From 9fea5d2f009f51d49944a382bfab141dff6999bc Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 29 Oct 2024 02:32:32 +0000
Subject: [PATCH 01/70] Release 0.8.9
---
README.md | 4 +-
reference.md | 1425 +++++++++--------
src/humanloop/__init__.py | 6 +
src/humanloop/core/client_wrapper.py | 18 +-
src/humanloop/evaluations/client.py | 111 +-
src/humanloop/flows/client.py | 8 +-
src/humanloop/prompts/client.py | 4 +-
src/humanloop/requests/__init__.py | 2 +
.../requests/evaluation_log_response.py | 1 -
src/humanloop/requests/run_stats_response.py | 4 +-
.../requests/version_specification.py | 37 +
src/humanloop/types/__init__.py | 4 +
.../types/evaluation_log_response.py | 12 +-
src/humanloop/types/logs_association_type.py | 5 +
src/humanloop/types/overall_stats.py | 4 +-
src/humanloop/types/run_stats_response.py | 4 +-
src/humanloop/types/version_specification.py | 48 +
17 files changed, 994 insertions(+), 703 deletions(-)
create mode 100644 src/humanloop/requests/version_specification.py
create mode 100644 src/humanloop/types/logs_association_type.py
create mode 100644 src/humanloop/types/version_specification.py
diff --git a/README.md b/README.md
index 97779deb..6dae7103 100644
--- a/README.md
+++ b/README.md
@@ -41,7 +41,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -88,7 +88,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
diff --git a/reference.md b/reference.md
index 948512c8..b08f0261 100644
--- a/reference.md
+++ b/reference.md
@@ -1,5 +1,7 @@
# Reference
+
## Prompts
+
client.prompts.log(...)
-
@@ -21,6 +23,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -56,7 +59,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -71,6 +74,7 @@ client.prompts.log(
)
```
+
@@ -85,7 +89,7 @@ client.prompts.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -93,7 +97,7 @@ client.prompts.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -101,7 +105,7 @@ client.prompts.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
@@ -109,7 +113,7 @@ client.prompts.log(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -117,7 +121,7 @@ client.prompts.log(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -125,7 +129,7 @@ client.prompts.log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
-
+
@@ -133,7 +137,7 @@ client.prompts.log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
-
+
@@ -141,7 +145,7 @@ client.prompts.log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
-
+
@@ -149,7 +153,7 @@ client.prompts.log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
-
+
@@ -157,7 +161,7 @@ client.prompts.log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
-
+
@@ -165,7 +169,7 @@ client.prompts.log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
-
+
@@ -173,21 +177,22 @@ client.prompts.log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model can decide to call one or more of the provided tools.
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model can decide to call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
@@ -195,7 +200,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
-
+
@@ -203,7 +208,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -211,7 +216,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -219,15 +224,15 @@ Controls how the model uses tools. The following options are supported:
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -235,7 +240,7 @@ Controls how the model uses tools. The following options are supported:
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -243,7 +248,7 @@ Controls how the model uses tools. The following options are supported:
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -251,7 +256,7 @@ Controls how the model uses tools. The following options are supported:
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -259,7 +264,7 @@ Controls how the model uses tools. The following options are supported:
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -267,7 +272,7 @@ Controls how the model uses tools. The following options are supported:
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -275,7 +280,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -283,7 +288,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -291,7 +296,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -299,7 +304,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -307,7 +312,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -315,7 +320,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -323,7 +328,7 @@ Controls how the model uses tools. The following options are supported:
**prompt_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -331,7 +336,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -339,13 +344,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -365,6 +369,7 @@ Controls how the model uses tools. The following options are supported:
Update a Log.
Update the details of a Log with the given ID.
+
@@ -390,6 +395,7 @@ client.prompts.update_log(
)
```
+
@@ -404,7 +410,7 @@ client.prompts.update_log(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -412,7 +418,7 @@ client.prompts.update_log(
**log_id:** `str` — Unique identifier for the Log.
-
+
@@ -420,7 +426,7 @@ client.prompts.update_log(
**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
-
+
@@ -428,7 +434,7 @@ client.prompts.update_log(
**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
-
+
@@ -436,7 +442,7 @@ client.prompts.update_log(
**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
-
+
@@ -444,7 +450,7 @@ client.prompts.update_log(
**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
-
+
@@ -452,7 +458,7 @@ client.prompts.update_log(
**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
-
+
@@ -460,7 +466,7 @@ client.prompts.update_log(
**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
-
+
@@ -468,21 +474,22 @@ client.prompts.update_log(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptLogUpdateRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model can decide to call one or more of the provided tools.
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model can decide to call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
@@ -490,15 +497,15 @@ Controls how the model uses tools. The following options are supported:
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -506,7 +513,7 @@ Controls how the model uses tools. The following options are supported:
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -514,7 +521,7 @@ Controls how the model uses tools. The following options are supported:
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -522,7 +529,7 @@ Controls how the model uses tools. The following options are supported:
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -530,7 +537,7 @@ Controls how the model uses tools. The following options are supported:
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -538,7 +545,7 @@ Controls how the model uses tools. The following options are supported:
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -546,7 +553,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -554,7 +561,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -562,7 +569,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -570,7 +577,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -578,7 +585,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -586,13 +593,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -621,6 +627,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -696,6 +703,7 @@ for chunk in response:
yield chunk
```
+
@@ -710,7 +718,7 @@ for chunk in response:
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -718,7 +726,7 @@ for chunk in response:
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -726,7 +734,7 @@ for chunk in response:
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -734,7 +742,7 @@ for chunk in response:
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -742,21 +750,22 @@ for chunk in response:
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model can decide to call one or more of the provided tools.
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model can decide to call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
@@ -764,7 +773,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
-
+
@@ -772,7 +781,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -780,7 +789,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -788,7 +797,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -796,7 +805,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -804,7 +813,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -812,7 +821,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -820,7 +829,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -828,7 +837,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -836,7 +845,7 @@ Controls how the model uses tools. The following options are supported:
**prompts_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -844,7 +853,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -852,7 +861,7 @@ Controls how the model uses tools. The following options are supported:
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -860,7 +869,7 @@ Controls how the model uses tools. The following options are supported:
**num_samples:** `typing.Optional[int]` — The number of generations.
-
+
@@ -868,7 +877,7 @@ Controls how the model uses tools. The following options are supported:
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -876,7 +885,7 @@ Controls how the model uses tools. The following options are supported:
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response
-
+
@@ -884,7 +893,7 @@ Controls how the model uses tools. The following options are supported:
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
-
+
@@ -892,13 +901,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -927,6 +935,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Prompt details in the request body. In this case, we will check if the details correspond
to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
in the case where you are storing or deriving your Prompt details in code.
+
@@ -954,6 +963,7 @@ client.prompts.call(
)
```
+
@@ -968,7 +978,7 @@ client.prompts.call(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to log to.
-
+
@@ -976,7 +986,7 @@ client.prompts.call(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -984,7 +994,7 @@ client.prompts.call(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -992,7 +1002,7 @@ client.prompts.call(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -1000,21 +1010,22 @@ client.prompts.call(
**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
+
-
-**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]`
+**tool_choice:** `typing.Optional[PromptsCallRequestToolChoiceParams]`
-Controls how the model uses tools. The following options are supported:
-- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
-- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
-- `'required'` means the model can decide to call one or more of the provided tools.
+Controls how the model uses tools. The following options are supported:
+
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model can decide to call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
-
+
@@ -1022,7 +1033,7 @@ Controls how the model uses tools. The following options are supported:
**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
-
+
@@ -1030,7 +1041,7 @@ Controls how the model uses tools. The following options are supported:
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -1038,7 +1049,7 @@ Controls how the model uses tools. The following options are supported:
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -1046,7 +1057,7 @@ Controls how the model uses tools. The following options are supported:
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -1054,7 +1065,7 @@ Controls how the model uses tools. The following options are supported:
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -1062,7 +1073,7 @@ Controls how the model uses tools. The following options are supported:
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -1070,7 +1081,7 @@ Controls how the model uses tools. The following options are supported:
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -1078,7 +1089,7 @@ Controls how the model uses tools. The following options are supported:
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -1086,7 +1097,7 @@ Controls how the model uses tools. The following options are supported:
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -1094,7 +1105,7 @@ Controls how the model uses tools. The following options are supported:
**prompts_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -1102,7 +1113,7 @@ Controls how the model uses tools. The following options are supported:
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -1110,7 +1121,7 @@ Controls how the model uses tools. The following options are supported:
**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
-
+
@@ -1118,7 +1129,7 @@ Controls how the model uses tools. The following options are supported:
**num_samples:** `typing.Optional[int]` — The number of generations.
-
+
@@ -1126,7 +1137,7 @@ Controls how the model uses tools. The following options are supported:
**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
-
+
@@ -1134,7 +1145,7 @@ Controls how the model uses tools. The following options are supported:
**logprobs:** `typing.Optional[int]` — Include the log probabilities of the top n tokens in the provider_response
-
+
@@ -1142,7 +1153,7 @@ Controls how the model uses tools. The following options are supported:
**suffix:** `typing.Optional[str]` — The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
-
+
@@ -1150,13 +1161,12 @@ Controls how the model uses tools. The following options are supported:
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1174,6 +1184,7 @@ Controls how the model uses tools. The following options are supported:
Get a list of all Prompts.
+
@@ -1203,6 +1214,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -1217,7 +1229,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -1225,7 +1237,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Prompts to fetch.
-
+
@@ -1233,7 +1245,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Prompt name.
-
+
@@ -1241,7 +1253,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
-
+
@@ -1249,7 +1261,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by
-
+
@@ -1257,7 +1269,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -1265,13 +1277,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1295,6 +1306,7 @@ Prompts are identified by the `ID` or their `path`. The parameters (i.e. the pro
If you provide a commit message, then the new version will be committed;
otherwise it will be uncommitted. If you try to commit an already committed version,
an exception will be raised.
+
@@ -1337,6 +1349,7 @@ client.prompts.upsert(
)
```
+
@@ -1351,7 +1364,7 @@ client.prompts.upsert(
**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
-
+
@@ -1359,7 +1372,7 @@ client.prompts.upsert(
**path:** `typing.Optional[str]` — Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -1367,7 +1380,7 @@ client.prompts.upsert(
**id:** `typing.Optional[str]` — ID for an existing Prompt.
-
+
@@ -1375,22 +1388,22 @@ client.prompts.upsert(
**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
-
+
-
-**template:** `typing.Optional[PromptRequestTemplateParams]`
+**template:** `typing.Optional[PromptRequestTemplateParams]`
-The template contains the main structure and instructions for the model, including input variables for dynamic values.
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
-For completion models, provide a prompt template as a string.
+For completion models, provide a prompt template as a string.
Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
-
+
@@ -1398,7 +1411,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
-
+
@@ -1406,7 +1419,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
-
+
@@ -1414,7 +1427,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
-
+
@@ -1422,7 +1435,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
-
+
@@ -1430,7 +1443,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**stop:** `typing.Optional[PromptRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
-
+
@@ -1438,7 +1451,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
-
+
@@ -1446,7 +1459,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
-
+
@@ -1454,7 +1467,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
-
+
@@ -1462,7 +1475,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
-
+
@@ -1470,7 +1483,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
-
+
@@ -1478,7 +1491,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**tools:** `typing.Optional[typing.Sequence[ToolFunctionParams]]` — The tool specification that the model can choose to call if Tool calling is supported.
-
+
@@ -1486,7 +1499,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**linked_tools:** `typing.Optional[typing.Sequence[str]]` — The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called.
-
+
@@ -1494,7 +1507,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
-
+
@@ -1502,7 +1515,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**commit_message:** `typing.Optional[str]` — Message describing the changes made.
-
+
@@ -1510,13 +1523,12 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1537,6 +1549,7 @@ Retrieve the Prompt with the given ID.
By default, the deployed version of the Prompt is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Prompt.
+
@@ -1561,6 +1574,7 @@ client.prompts.get(
)
```
+
@@ -1575,7 +1589,7 @@ client.prompts.get(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1583,7 +1597,7 @@ client.prompts.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
-
+
@@ -1591,7 +1605,7 @@ client.prompts.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -1599,13 +1613,12 @@ client.prompts.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1623,6 +1636,7 @@ client.prompts.get(
Delete the Prompt with the given ID.
+
@@ -1647,6 +1661,7 @@ client.prompts.delete(
)
```
+
@@ -1661,7 +1676,7 @@ client.prompts.delete(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1669,13 +1684,12 @@ client.prompts.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1693,6 +1707,7 @@ client.prompts.delete(
Move the Prompt to a different path or change the name.
+
@@ -1718,6 +1733,7 @@ client.prompts.move(
)
```
+
@@ -1732,7 +1748,7 @@ client.prompts.move(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1740,7 +1756,7 @@ client.prompts.move(
**path:** `typing.Optional[str]` — Path of the Prompt including the Prompt name, which is used as a unique identifier.
-
+
@@ -1748,7 +1764,7 @@ client.prompts.move(
**name:** `typing.Optional[str]` — Name of the Prompt.
-
+
@@ -1756,13 +1772,12 @@ client.prompts.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1780,6 +1795,7 @@ client.prompts.move(
Get a list of all the versions of a Prompt.
+
@@ -1805,6 +1821,7 @@ client.prompts.list_versions(
)
```
+
@@ -1819,7 +1836,7 @@ client.prompts.list_versions(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1827,7 +1844,7 @@ client.prompts.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned.
-
+
@@ -1835,7 +1852,7 @@ client.prompts.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -1843,13 +1860,12 @@ client.prompts.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1869,6 +1885,7 @@ client.prompts.list_versions(
Commit a version of the Prompt with a commit message.
If the version is already committed, an exception will be raised.
+
@@ -1895,6 +1912,7 @@ client.prompts.commit(
)
```
+
@@ -1909,7 +1927,7 @@ client.prompts.commit(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -1917,7 +1935,7 @@ client.prompts.commit(
**version_id:** `str` — Unique identifier for the specific version of the Prompt.
-
+
@@ -1925,7 +1943,7 @@ client.prompts.commit(
**commit_message:** `str` — Message describing the changes made.
-
+
@@ -1933,13 +1951,12 @@ client.prompts.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -1960,6 +1977,7 @@ Deploy Prompt to an Environment.
Set the deployed version for the specified Environment. This Prompt
will be used for calls made to the Prompt in this Environment.
+
@@ -1986,6 +2004,7 @@ client.prompts.set_deployment(
)
```
+
@@ -2000,7 +2019,7 @@ client.prompts.set_deployment(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2008,7 +2027,7 @@ client.prompts.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -2016,7 +2035,7 @@ client.prompts.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Prompt.
-
+
@@ -2024,13 +2043,12 @@ client.prompts.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2051,6 +2069,7 @@ Remove deployed Prompt from the Environment.
Remove the deployed version for the specified Environment. This Prompt
will no longer be used for calls made to the Prompt in this Environment.
+
@@ -2076,6 +2095,7 @@ client.prompts.remove_deployment(
)
```
+
@@ -2090,7 +2110,7 @@ client.prompts.remove_deployment(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2098,7 +2118,7 @@ client.prompts.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -2106,13 +2126,12 @@ client.prompts.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2130,6 +2149,7 @@ client.prompts.remove_deployment(
List all Environments and their deployed versions for the Prompt.
+
@@ -2154,6 +2174,7 @@ client.prompts.list_environments(
)
```
+
@@ -2168,7 +2189,7 @@ client.prompts.list_environments(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2176,13 +2197,12 @@ client.prompts.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2203,6 +2223,7 @@ Activate and deactivate Evaluators for monitoring the Prompt.
An activated Evaluator will automatically be run on all new Logs
within the Prompt for monitoring purposes.
+
@@ -2228,6 +2249,7 @@ client.prompts.update_monitoring(
)
```
+
@@ -2241,8 +2263,8 @@ client.prompts.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -2252,7 +2274,7 @@ client.prompts.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -2262,7 +2284,7 @@ client.prompts.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -2270,18 +2292,18 @@ client.prompts.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Tools
+
client.tools.log(...)
-
@@ -2303,6 +2325,7 @@ Instead of targeting an existing version explicitly, you can instead pass in
Tool details in the request body. In this case, we will check if the details correspond
to an existing version of the Tool, if not we will create a new version. This is helpful
in the case where you are storing or deriving your Tool details in code.
+
@@ -2343,6 +2366,7 @@ client.tools.log(
)
```
+
@@ -2357,7 +2381,7 @@ client.tools.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
-
+
@@ -2365,7 +2389,7 @@ client.tools.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -2373,7 +2397,7 @@ client.tools.log(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -2381,7 +2405,7 @@ client.tools.log(
**id:** `typing.Optional[str]` — ID for an existing Tool.
-
+
@@ -2389,7 +2413,7 @@ client.tools.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -2397,7 +2421,7 @@ client.tools.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -2405,15 +2429,15 @@ client.tools.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -2421,7 +2445,7 @@ client.tools.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -2429,7 +2453,7 @@ client.tools.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -2437,7 +2461,7 @@ client.tools.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -2445,7 +2469,7 @@ client.tools.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -2453,7 +2477,7 @@ client.tools.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -2461,7 +2485,7 @@ client.tools.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -2469,7 +2493,7 @@ client.tools.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -2477,7 +2501,7 @@ client.tools.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -2485,7 +2509,7 @@ client.tools.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -2493,7 +2517,7 @@ client.tools.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -2501,7 +2525,7 @@ client.tools.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -2509,7 +2533,7 @@ client.tools.log(
**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -2517,7 +2541,7 @@ client.tools.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -2525,7 +2549,7 @@ client.tools.log(
**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
-
+
@@ -2533,13 +2557,12 @@ client.tools.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2559,6 +2582,7 @@ client.tools.log(
Update a Log.
Update the details of a Log with the given ID.
+
@@ -2584,6 +2608,7 @@ client.tools.update(
)
```
+
@@ -2598,7 +2623,7 @@ client.tools.update(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -2606,7 +2631,7 @@ client.tools.update(
**log_id:** `str` — Unique identifier for the Log.
-
+
@@ -2614,15 +2639,15 @@ client.tools.update(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -2630,7 +2655,7 @@ client.tools.update(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -2638,7 +2663,7 @@ client.tools.update(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -2646,7 +2671,7 @@ client.tools.update(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -2654,7 +2679,7 @@ client.tools.update(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -2662,7 +2687,7 @@ client.tools.update(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -2670,7 +2695,7 @@ client.tools.update(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -2678,7 +2703,7 @@ client.tools.update(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -2686,7 +2711,7 @@ client.tools.update(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -2694,7 +2719,7 @@ client.tools.update(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -2702,7 +2727,7 @@ client.tools.update(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -2710,13 +2735,12 @@ client.tools.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2734,6 +2758,7 @@ client.tools.update(
Get a list of all Tools.
+
@@ -2763,6 +2788,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -2777,7 +2803,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -2785,7 +2811,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
+
@@ -2793,7 +2819,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
-
+
@@ -2801,7 +2827,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
-
+
@@ -2809,7 +2835,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
-
+
@@ -2817,7 +2843,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -2825,13 +2851,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2855,6 +2880,7 @@ Tools are identified by the `ID` or their `path`. The name, description and para
If you provide a commit message, then the new version will be committed;
otherwise it will be uncommitted. If you try to commit an already committed version,
an exception will be raised.
+
@@ -2889,6 +2915,7 @@ client.tools.upsert(
)
```
+
@@ -2903,7 +2930,7 @@ client.tools.upsert(
**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -2911,7 +2938,7 @@ client.tools.upsert(
**id:** `typing.Optional[str]` — ID for an existing Tool.
-
+
@@ -2919,7 +2946,7 @@ client.tools.upsert(
**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
-
+
@@ -2927,7 +2954,7 @@ client.tools.upsert(
**source_code:** `typing.Optional[str]` — Code source of the Tool.
-
+
@@ -2935,7 +2962,7 @@ client.tools.upsert(
**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
-
+
@@ -2943,7 +2970,7 @@ client.tools.upsert(
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
-
+
@@ -2951,7 +2978,7 @@ client.tools.upsert(
**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
-
+
@@ -2959,7 +2986,7 @@ client.tools.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made.
-
+
@@ -2967,13 +2994,12 @@ client.tools.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -2994,6 +3020,7 @@ Retrieve the Tool with the given ID.
By default, the deployed version of the Tool is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Tool.
+
@@ -3018,6 +3045,7 @@ client.tools.get(
)
```
+
@@ -3032,7 +3060,7 @@ client.tools.get(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3040,7 +3068,7 @@ client.tools.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
-
+
@@ -3048,7 +3076,7 @@ client.tools.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -3056,13 +3084,12 @@ client.tools.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3080,6 +3107,7 @@ client.tools.get(
Delete the Tool with the given ID.
+
@@ -3104,6 +3132,7 @@ client.tools.delete(
)
```
+
@@ -3118,7 +3147,7 @@ client.tools.delete(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3126,13 +3155,12 @@ client.tools.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3150,6 +3178,7 @@ client.tools.delete(
Move the Tool to a different path or change the name.
+
@@ -3175,6 +3204,7 @@ client.tools.move(
)
```
+
@@ -3189,7 +3219,7 @@ client.tools.move(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3197,7 +3227,7 @@ client.tools.move(
**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
-
+
@@ -3205,7 +3235,7 @@ client.tools.move(
**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
-
+
@@ -3213,13 +3243,12 @@ client.tools.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3237,6 +3266,7 @@ client.tools.move(
Get a list of all the versions of a Tool.
+
@@ -3262,6 +3292,7 @@ client.tools.list_versions(
)
```
+
@@ -3276,7 +3307,7 @@ client.tools.list_versions(
**id:** `str` — Unique identifier for the Tool.
-
+
@@ -3284,7 +3315,7 @@ client.tools.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned.
-
+
@@ -3292,7 +3323,7 @@ client.tools.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -3300,13 +3331,12 @@ client.tools.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3326,6 +3356,7 @@ client.tools.list_versions(
Commit a version of the Tool with a commit message.
If the version is already committed, an exception will be raised.
+
@@ -3352,6 +3383,7 @@ client.tools.commit(
)
```
+
@@ -3366,7 +3398,7 @@ client.tools.commit(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3374,7 +3406,7 @@ client.tools.commit(
**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
+
@@ -3382,7 +3414,7 @@ client.tools.commit(
**commit_message:** `str` — Message describing the changes made.
-
+
@@ -3390,13 +3422,12 @@ client.tools.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3417,6 +3448,7 @@ Deploy Tool to an Environment.
Set the deployed version for the specified Environment. This Prompt
will be used for calls made to the Tool in this Environment.
+
@@ -3443,6 +3475,7 @@ client.tools.set_deployment(
)
```
+
@@ -3457,7 +3490,7 @@ client.tools.set_deployment(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3465,7 +3498,7 @@ client.tools.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -3473,7 +3506,7 @@ client.tools.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
+
@@ -3481,13 +3514,12 @@ client.tools.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3508,6 +3540,7 @@ Remove deployed Tool from the Environment.
Remove the deployed version for the specified Environment. This Tool
will no longer be used for calls made to the Tool in this Environment.
+
@@ -3533,6 +3566,7 @@ client.tools.remove_deployment(
)
```
+
@@ -3547,7 +3581,7 @@ client.tools.remove_deployment(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3555,7 +3589,7 @@ client.tools.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -3563,13 +3597,12 @@ client.tools.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3587,6 +3620,7 @@ client.tools.remove_deployment(
List all Environments and their deployed versions for the Tool.
+
@@ -3611,6 +3645,7 @@ client.tools.list_environments(
)
```
+
@@ -3625,7 +3660,7 @@ client.tools.list_environments(
**id:** `str` — Unique identifier for Tool.
-
+
@@ -3633,13 +3668,12 @@ client.tools.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3660,6 +3694,7 @@ Activate and deactivate Evaluators for monitoring the Tool.
An activated Evaluator will automatically be run on all new Logs
within the Tool for monitoring purposes.
+
@@ -3685,6 +3720,7 @@ client.tools.update_monitoring(
)
```
+
@@ -3698,8 +3734,8 @@ client.tools.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -3709,7 +3745,7 @@ client.tools.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -3719,7 +3755,7 @@ client.tools.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -3727,18 +3763,18 @@ client.tools.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Datasets
+
client.datasets.list(...)
-
@@ -3752,6 +3788,7 @@ client.tools.update_monitoring(
-
List all Datasets.
+
@@ -3781,6 +3818,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -3795,7 +3833,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -3803,7 +3841,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
-
+
@@ -3811,7 +3849,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
-
+
@@ -3819,7 +3857,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
-
+
@@ -3827,7 +3865,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
-
+
@@ -3835,7 +3873,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -3843,13 +3881,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -3883,6 +3920,7 @@ an exception will be raised.
Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
+
@@ -3932,6 +3970,7 @@ client.datasets.upsert(
)
```
+
@@ -3946,7 +3985,7 @@ client.datasets.upsert(
**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
-
+
@@ -3954,7 +3993,7 @@ client.datasets.upsert(
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
+
@@ -3962,7 +4001,7 @@ client.datasets.upsert(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
+
@@ -3970,7 +4009,7 @@ client.datasets.upsert(
**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -3978,23 +4017,23 @@ client.datasets.upsert(
**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+**action:** `typing.Optional[UpdateDatesetAction]`
The action to take with the provided Datapoints.
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+- If `"set"`, the created version will only contain the Datapoints provided in this request.
+- If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+- If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+
@@ -4002,7 +4041,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+
@@ -4010,7 +4049,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**commit_message:** `typing.Optional[str]` — Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created.
-
+
@@ -4018,13 +4057,12 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4050,6 +4088,7 @@ retrieve Datapoints for a large Dataset.
By default, the deployed version of the Dataset is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Dataset.
+
@@ -4076,6 +4115,7 @@ client.datasets.get(
)
```
+
@@ -4090,7 +4130,7 @@ client.datasets.get(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4098,7 +4138,7 @@ client.datasets.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
+
@@ -4106,7 +4146,7 @@ client.datasets.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -4114,7 +4154,7 @@ client.datasets.get(
**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
@@ -4122,13 +4162,12 @@ client.datasets.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4146,6 +4185,7 @@ client.datasets.get(
Delete the Dataset with the given ID.
+
@@ -4170,6 +4210,7 @@ client.datasets.delete(
)
```
+
@@ -4184,7 +4225,7 @@ client.datasets.delete(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4192,13 +4233,12 @@ client.datasets.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4216,6 +4256,7 @@ client.datasets.delete(
Move the Dataset to a different path or change the name.
+
@@ -4240,6 +4281,7 @@ client.datasets.move(
)
```
+
@@ -4254,7 +4296,7 @@ client.datasets.move(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4262,7 +4304,7 @@ client.datasets.move(
**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
-
+
@@ -4270,7 +4312,7 @@ client.datasets.move(
**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
-
+
@@ -4278,13 +4320,12 @@ client.datasets.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4302,6 +4343,7 @@ client.datasets.move(
List all Datapoints for the Dataset with the given ID.
+
@@ -4332,6 +4374,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -4346,7 +4389,7 @@ for page in response.iter_pages():
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4354,7 +4397,7 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
+
@@ -4362,7 +4405,7 @@ for page in response.iter_pages():
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -4370,7 +4413,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -4378,7 +4421,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
-
+
@@ -4386,13 +4429,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4410,6 +4452,7 @@ for page in response.iter_pages():
Get a list of the versions for a Dataset.
+
@@ -4435,6 +4478,7 @@ client.datasets.list_versions(
)
```
+
@@ -4449,7 +4493,7 @@ client.datasets.list_versions(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4457,7 +4501,7 @@ client.datasets.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned.
-
+
@@ -4465,7 +4509,7 @@ client.datasets.list_versions(
**include_datapoints:** `typing.Optional[typing.Literal["latest_committed"]]` — If set to 'latest_committed', include the Datapoints for the latest committed version. Defaults to `None`.
-
+
@@ -4473,13 +4517,12 @@ client.datasets.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4499,6 +4542,7 @@ client.datasets.list_versions(
Commit a version of the Dataset with a commit message.
If the version is already committed, an exception will be raised.
+
@@ -4525,6 +4569,7 @@ client.datasets.commit(
)
```
+
@@ -4539,7 +4584,7 @@ client.datasets.commit(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4547,7 +4592,7 @@ client.datasets.commit(
**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
+
@@ -4555,7 +4600,7 @@ client.datasets.commit(
**commit_message:** `str` — Message describing the changes made.
-
+
@@ -4563,13 +4608,12 @@ client.datasets.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4594,6 +4638,7 @@ If either `version_id` or `environment` is provided, the new version will be bas
with the Datapoints from the CSV file added to the existing Datapoints in the version.
If neither `version_id` nor `environment` is provided, the new version will be based on the version
of the Dataset that is deployed to the default Environment.
+
@@ -4619,6 +4664,7 @@ client.datasets.upload_csv(
)
```
+
@@ -4633,17 +4679,17 @@ client.datasets.upload_csv(
**id:** `str` — Unique identifier for the Dataset
-
+
-
-**file:** `from __future__ import annotations
+**file:** `from **future** import annotations
core.File` — See core.File for more documentation
-
+
@@ -4651,7 +4697,7 @@ core.File` — See core.File for more documentation
**commit_message:** `str` — Commit message for the new Dataset version.
-
+
@@ -4659,7 +4705,7 @@ core.File` — See core.File for more documentation
**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
-
+
@@ -4667,7 +4713,7 @@ core.File` — See core.File for more documentation
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
-
+
@@ -4675,13 +4721,12 @@ core.File` — See core.File for more documentation
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4701,6 +4746,7 @@ core.File` — See core.File for more documentation
Deploy Dataset to Environment.
Set the deployed version for the specified Environment.
+
@@ -4727,6 +4773,7 @@ client.datasets.set_deployment(
)
```
+
@@ -4741,7 +4788,7 @@ client.datasets.set_deployment(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4749,7 +4796,7 @@ client.datasets.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -4757,7 +4804,7 @@ client.datasets.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
+
@@ -4765,13 +4812,12 @@ client.datasets.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4791,6 +4837,7 @@ client.datasets.set_deployment(
Remove deployed Dataset from Environment.
Remove the deployed version for the specified Environment.
+
@@ -4816,6 +4863,7 @@ client.datasets.remove_deployment(
)
```
+
@@ -4830,7 +4878,7 @@ client.datasets.remove_deployment(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4838,7 +4886,7 @@ client.datasets.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -4846,13 +4894,12 @@ client.datasets.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -4870,6 +4917,7 @@ client.datasets.remove_deployment(
List all Environments and their deployed versions for the Dataset.
+
@@ -4894,6 +4942,7 @@ client.datasets.list_environments(
)
```
+
@@ -4908,7 +4957,7 @@ client.datasets.list_environments(
**id:** `str` — Unique identifier for Dataset.
-
+
@@ -4916,18 +4965,18 @@ client.datasets.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Evaluators
+
client.evaluators.log(...)
-
@@ -4943,6 +4992,7 @@ client.datasets.list_environments(
Submit Evaluator judgment for an existing Log.
Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+
@@ -4967,6 +5017,7 @@ client.evaluators.log(
)
```
+
@@ -4981,7 +5032,7 @@ client.evaluators.log(
**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
-
+
@@ -4989,7 +5040,7 @@ client.evaluators.log(
**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
-
+
@@ -4997,7 +5048,7 @@ client.evaluators.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -5005,7 +5056,7 @@ client.evaluators.log(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -5013,7 +5064,7 @@ client.evaluators.log(
**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
@@ -5021,7 +5072,7 @@ client.evaluators.log(
**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+
@@ -5029,7 +5080,7 @@ client.evaluators.log(
**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
@@ -5037,15 +5088,15 @@ client.evaluators.log(
**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -5053,7 +5104,7 @@ client.evaluators.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -5061,7 +5112,7 @@ client.evaluators.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -5069,7 +5120,7 @@ client.evaluators.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -5077,7 +5128,7 @@ client.evaluators.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+
@@ -5085,7 +5136,7 @@ client.evaluators.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
+
@@ -5093,7 +5144,7 @@ client.evaluators.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -5101,7 +5152,7 @@ client.evaluators.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -5109,7 +5160,7 @@ client.evaluators.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -5117,7 +5168,7 @@ client.evaluators.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -5125,7 +5176,7 @@ client.evaluators.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -5133,7 +5184,7 @@ client.evaluators.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -5141,7 +5192,7 @@ client.evaluators.log(
**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -5149,7 +5200,7 @@ client.evaluators.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -5157,7 +5208,7 @@ client.evaluators.log(
**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
-
+
@@ -5165,15 +5216,15 @@ client.evaluators.log(
**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
-
+
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
-
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+
@@ -5181,13 +5232,12 @@ client.evaluators.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5205,6 +5255,7 @@ client.evaluators.log(
Get a list of all Evaluators.
+
@@ -5234,6 +5285,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -5248,7 +5300,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -5256,7 +5308,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
-
+
@@ -5264,7 +5316,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
-
+
@@ -5272,7 +5324,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
-
+
@@ -5280,7 +5332,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
-
+
@@ -5288,7 +5340,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -5296,13 +5348,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5326,6 +5377,7 @@ Evaluators are identified by the `ID` or their `path`. The spec provided determi
If you provide a commit message, then the new version will be committed;
otherwise it will be uncommitted. If you try to commit an already committed version,
an exception will be raised.
+
@@ -5357,6 +5409,7 @@ client.evaluators.upsert(
)
```
+
@@ -5370,8 +5423,8 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
+**spec:** `EvaluatorRequestSpecParams`
+
@@ -5379,7 +5432,7 @@ client.evaluators.upsert(
**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -5387,7 +5440,7 @@ client.evaluators.upsert(
**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
@@ -5395,7 +5448,7 @@ client.evaluators.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made.
-
+
@@ -5403,13 +5456,12 @@ client.evaluators.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5430,6 +5482,7 @@ Retrieve the Evaluator with the given ID.
By default, the deployed version of the Evaluator is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Evaluator.
+
@@ -5454,6 +5507,7 @@ client.evaluators.get(
)
```
+
@@ -5468,7 +5522,7 @@ client.evaluators.get(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -5476,7 +5530,7 @@ client.evaluators.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
-
+
@@ -5484,7 +5538,7 @@ client.evaluators.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -5492,13 +5546,12 @@ client.evaluators.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5516,6 +5569,7 @@ client.evaluators.get(
Delete the Evaluator with the given ID.
+
@@ -5540,6 +5594,7 @@ client.evaluators.delete(
)
```
+
@@ -5554,7 +5609,7 @@ client.evaluators.delete(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -5562,13 +5617,12 @@ client.evaluators.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5586,6 +5640,7 @@ client.evaluators.delete(
Move the Evaluator to a different path or change the name.
+
@@ -5611,6 +5666,7 @@ client.evaluators.move(
)
```
+
@@ -5625,7 +5681,7 @@ client.evaluators.move(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -5633,7 +5689,7 @@ client.evaluators.move(
**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
-
+
@@ -5641,7 +5697,7 @@ client.evaluators.move(
**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
-
+
@@ -5649,13 +5705,12 @@ client.evaluators.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5673,6 +5728,7 @@ client.evaluators.move(
Get a list of all the versions of an Evaluator.
+
@@ -5697,6 +5753,7 @@ client.evaluators.list_versions(
)
```
+
@@ -5711,7 +5768,7 @@ client.evaluators.list_versions(
**id:** `str` — Unique identifier for the Evaluator.
-
+
@@ -5719,7 +5776,7 @@ client.evaluators.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned.
-
+
@@ -5727,7 +5784,7 @@ client.evaluators.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -5735,13 +5792,12 @@ client.evaluators.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5761,6 +5817,7 @@ client.evaluators.list_versions(
Commit a version of the Evaluator with a commit message.
If the version is already committed, an exception will be raised.
+
@@ -5787,6 +5844,7 @@ client.evaluators.commit(
)
```
+
@@ -5801,7 +5859,7 @@ client.evaluators.commit(
**id:** `str` — Unique identifier for Prompt.
-
+
@@ -5809,7 +5867,7 @@ client.evaluators.commit(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
-
+
@@ -5817,7 +5875,7 @@ client.evaluators.commit(
**commit_message:** `str` — Message describing the changes made.
-
+
@@ -5825,13 +5883,12 @@ client.evaluators.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5852,6 +5909,7 @@ Deploy Evaluator to an Environment.
Set the deployed version for the specified Environment. This Evaluator
will be used for calls made to the Evaluator in this Environment.
+
@@ -5878,6 +5936,7 @@ client.evaluators.set_deployment(
)
```
+
@@ -5892,7 +5951,7 @@ client.evaluators.set_deployment(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -5900,7 +5959,7 @@ client.evaluators.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -5908,7 +5967,7 @@ client.evaluators.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
-
+
@@ -5916,13 +5975,12 @@ client.evaluators.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -5943,6 +6001,7 @@ Remove deployed Evaluator from the Environment.
Remove the deployed version for the specified Environment. This Evaluator
will no longer be used for calls made to the Evaluator in this Environment.
+
@@ -5968,6 +6027,7 @@ client.evaluators.remove_deployment(
)
```
+
@@ -5982,7 +6042,7 @@ client.evaluators.remove_deployment(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -5990,7 +6050,7 @@ client.evaluators.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -5998,13 +6058,12 @@ client.evaluators.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6022,6 +6081,7 @@ client.evaluators.remove_deployment(
List all Environments and their deployed versions for the Evaluator.
+
@@ -6046,6 +6106,7 @@ client.evaluators.list_environments(
)
```
+
@@ -6060,7 +6121,7 @@ client.evaluators.list_environments(
**id:** `str` — Unique identifier for Evaluator.
-
+
@@ -6068,13 +6129,12 @@ client.evaluators.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6095,6 +6155,7 @@ Activate and deactivate Evaluators for monitoring the Evaluator.
An activated Evaluator will automatically be run on all new Logs
within the Evaluator for monitoring purposes.
+
@@ -6119,6 +6180,7 @@ client.evaluators.update_monitoring(
)
```
+
@@ -6132,8 +6194,8 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -6143,7 +6205,7 @@ client.evaluators.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -6153,7 +6215,7 @@ client.evaluators.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -6161,18 +6223,18 @@ client.evaluators.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Flows
+
client.flows.log(...)
-
@@ -6189,6 +6251,7 @@ Log to a Flow.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
@@ -6233,14 +6296,15 @@ client.flows.log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
```
+
@@ -6255,7 +6319,7 @@ client.flows.log(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
-
+
@@ -6263,7 +6327,7 @@ client.flows.log(
**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
-
+
@@ -6271,7 +6335,7 @@ client.flows.log(
**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
@@ -6279,7 +6343,7 @@ client.flows.log(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -6287,7 +6351,7 @@ client.flows.log(
**id:** `typing.Optional[str]` — ID for an existing Flow.
-
+
@@ -6295,7 +6359,7 @@ client.flows.log(
**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
-
+
@@ -6303,7 +6367,7 @@ client.flows.log(
**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
-
+
@@ -6311,15 +6375,15 @@ client.flows.log(
**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
+
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
@@ -6327,7 +6391,7 @@ client.flows.log(
**error:** `typing.Optional[str]` — Error message if the log is an error.
-
+
@@ -6335,7 +6399,7 @@ client.flows.log(
**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+
@@ -6343,7 +6407,7 @@ client.flows.log(
**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+
@@ -6351,7 +6415,7 @@ client.flows.log(
**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
-
+
@@ -6359,7 +6423,7 @@ client.flows.log(
**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
-
+
@@ -6367,7 +6431,7 @@ client.flows.log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
@@ -6375,7 +6439,7 @@ client.flows.log(
**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
+
@@ -6383,7 +6447,7 @@ client.flows.log(
**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
+
@@ -6391,7 +6455,7 @@ client.flows.log(
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
@@ -6399,7 +6463,7 @@ client.flows.log(
**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
+
@@ -6407,7 +6471,7 @@ client.flows.log(
**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+
@@ -6415,7 +6479,7 @@ client.flows.log(
**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
+
@@ -6423,7 +6487,7 @@ client.flows.log(
**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+
@@ -6431,7 +6495,7 @@ client.flows.log(
**log_id:** `typing.Optional[str]` — The identifier for the Log. If not specified, a default ID will be generated. This allows additional Logs to be appended to the trace without waiting for Humanloop to return an ID.
-
+
@@ -6439,7 +6503,7 @@ client.flows.log(
**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
-
+
@@ -6447,7 +6511,7 @@ client.flows.log(
**trace_status:** `typing.Optional[TraceStatus]` — Status of the Trace. When a Trace is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Traces. If you do not intend to add more Logs to the Trace after creation, set this to `complete`.
-
+
@@ -6455,13 +6519,12 @@ client.flows.log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6482,6 +6545,7 @@ Retrieve the Flow with the given ID.
By default, the deployed version of the Flow is returned. Use the query parameters
`version_id` or `environment` to target a specific version of the Flow.
+
@@ -6506,6 +6570,7 @@ client.flows.get(
)
```
+
@@ -6520,7 +6585,7 @@ client.flows.get(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -6528,7 +6593,7 @@ client.flows.get(
**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
-
+
@@ -6536,7 +6601,7 @@ client.flows.get(
**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
+
@@ -6544,13 +6609,12 @@ client.flows.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6568,6 +6632,7 @@ client.flows.get(
Delete the Flow with the given ID.
+
@@ -6592,6 +6657,7 @@ client.flows.delete(
)
```
+
@@ -6606,7 +6672,7 @@ client.flows.delete(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -6614,13 +6680,12 @@ client.flows.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6638,6 +6703,7 @@ client.flows.delete(
Move the Flow to a different path or change the name.
+
@@ -6663,6 +6729,7 @@ client.flows.move(
)
```
+
@@ -6677,7 +6744,7 @@ client.flows.move(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -6685,7 +6752,7 @@ client.flows.move(
**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
-
+
@@ -6693,7 +6760,7 @@ client.flows.move(
**name:** `typing.Optional[str]` — Name of the Flow.
-
+
@@ -6701,7 +6768,7 @@ client.flows.move(
**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
-
+
@@ -6709,13 +6776,12 @@ client.flows.move(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6733,6 +6799,7 @@ client.flows.move(
Get a list of Flows.
+
@@ -6762,6 +6829,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -6776,7 +6844,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -6784,7 +6852,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
+
@@ -6792,7 +6860,7 @@ for page in response.iter_pages():
**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
+
@@ -6800,7 +6868,7 @@ for page in response.iter_pages():
**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
+
@@ -6808,7 +6876,7 @@ for page in response.iter_pages():
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
+
@@ -6816,7 +6884,7 @@ for page in response.iter_pages():
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -6824,13 +6892,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6854,6 +6921,7 @@ Flows can also be identified by the `ID` or their `path`.
If you provide a commit message, then the new version will be committed;
otherwise it will be uncommitted. If you try to commit an already committed version,
an exception will be raised.
+
@@ -6891,6 +6959,7 @@ client.flows.upsert(
)
```
+
@@ -6905,7 +6974,7 @@ client.flows.upsert(
**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
+
@@ -6913,7 +6982,7 @@ client.flows.upsert(
**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
+
@@ -6921,7 +6990,7 @@ client.flows.upsert(
**id:** `typing.Optional[str]` — ID for an existing Flow.
-
+
@@ -6929,7 +6998,7 @@ client.flows.upsert(
**commit_message:** `typing.Optional[str]` — Message describing the changes made.
-
+
@@ -6937,13 +7006,12 @@ client.flows.upsert(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -6964,6 +7032,7 @@ Update the status, inputs, output of a Flow Log.
Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
Inputs and output (or error) must be provided in order to mark it as complete.
+
@@ -6993,6 +7062,7 @@ client.flows.update_log(
)
```
+
@@ -7007,7 +7077,7 @@ client.flows.update_log(
**log_id:** `str` — Unique identifier of the Flow Log.
-
+
@@ -7015,7 +7085,7 @@ client.flows.update_log(
**trace_status:** `TraceStatus` — Status of the Trace. When a Trace is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on completed Traces.
-
+
@@ -7023,7 +7093,7 @@ client.flows.update_log(
**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
-
+
@@ -7031,7 +7101,7 @@ client.flows.update_log(
**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this or `error`.
-
+
@@ -7039,7 +7109,7 @@ client.flows.update_log(
**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this or `output`.
-
+
@@ -7047,13 +7117,12 @@ client.flows.update_log(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7071,6 +7140,7 @@ client.flows.update_log(
Get a list of all the versions of a Flow.
+
@@ -7096,6 +7166,7 @@ client.flows.list_versions(
)
```
+
@@ -7110,7 +7181,7 @@ client.flows.list_versions(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7118,7 +7189,7 @@ client.flows.list_versions(
**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned.
-
+
@@ -7126,7 +7197,7 @@ client.flows.list_versions(
**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
-
+
@@ -7134,13 +7205,12 @@ client.flows.list_versions(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7160,6 +7230,7 @@ client.flows.list_versions(
Commit a version of the Flow with a commit message.
If the version is already committed, an exception will be raised.
+
@@ -7186,6 +7257,7 @@ client.flows.commit(
)
```
+
@@ -7200,7 +7272,7 @@ client.flows.commit(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7208,7 +7280,7 @@ client.flows.commit(
**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
+
@@ -7216,7 +7288,7 @@ client.flows.commit(
**commit_message:** `str` — Message describing the changes made.
-
+
@@ -7224,13 +7296,12 @@ client.flows.commit(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7251,6 +7322,7 @@ Deploy Flow to an Environment.
Set the deployed version for the specified Environment. This Flow
will be used for calls made to the Flow in this Environment.
+
@@ -7277,6 +7349,7 @@ client.flows.set_deployment(
)
```
+
@@ -7291,7 +7364,7 @@ client.flows.set_deployment(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7299,7 +7372,7 @@ client.flows.set_deployment(
**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
+
@@ -7307,7 +7380,7 @@ client.flows.set_deployment(
**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
+
@@ -7315,13 +7388,12 @@ client.flows.set_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7342,6 +7414,7 @@ Remove deployed Flow from the Environment.
Remove the deployed version for the specified Environment. This Flow
will no longer be used for calls made to the Flow in this Environment.
+
@@ -7367,6 +7440,7 @@ client.flows.remove_deployment(
)
```
+
@@ -7381,7 +7455,7 @@ client.flows.remove_deployment(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7389,7 +7463,7 @@ client.flows.remove_deployment(
**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
-
+
@@ -7397,13 +7471,12 @@ client.flows.remove_deployment(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7421,6 +7494,7 @@ client.flows.remove_deployment(
List all Environments and their deployed versions for the Flow.
+
@@ -7445,6 +7519,7 @@ client.flows.list_environments(
)
```
+
@@ -7459,7 +7534,7 @@ client.flows.list_environments(
**id:** `str` — Unique identifier for Flow.
-
+
@@ -7467,13 +7542,12 @@ client.flows.list_environments(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7494,6 +7568,7 @@ Activate and deactivate Evaluators for monitoring the Flow.
An activated Evaluator will automatically be run on all new "completed" Logs
within the Flow for monitoring purposes.
+
@@ -7519,6 +7594,7 @@ client.flows.update_monitoring(
)
```
+
@@ -7532,8 +7608,8 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
+**id:** `str`
+
@@ -7543,7 +7619,7 @@ client.flows.update_monitoring(
**activate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
+
@@ -7553,7 +7629,7 @@ client.flows.update_monitoring(
**deactivate:** `typing.Optional[
typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
]` — Evaluators to deactivate. These will not be run on new Logs.
-
+
@@ -7561,18 +7637,18 @@ client.flows.update_monitoring(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Directories
+
client.directories.list()
-
@@ -7586,6 +7662,7 @@ client.flows.update_monitoring(
-
Retrieve a list of all Directories.
+
@@ -7608,6 +7685,7 @@ client = Humanloop(
client.directories.list()
```
+
@@ -7622,13 +7700,12 @@ client.directories.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7646,6 +7723,7 @@ client.directories.list()
Creates a Directory.
+
@@ -7668,6 +7746,7 @@ client = Humanloop(
client.directories.create()
```
+
@@ -7682,7 +7761,7 @@ client.directories.create()
**name:** `typing.Optional[str]` — Name of the directory to create.
-
+
@@ -7690,7 +7769,7 @@ client.directories.create()
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Starts with `dir_`.
-
+
@@ -7698,7 +7777,7 @@ client.directories.create()
**path:** `typing.Optional[str]` — Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`.
-
+
@@ -7706,13 +7785,12 @@ client.directories.create()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7730,6 +7808,7 @@ client.directories.create()
Fetches a directory by ID.
+
@@ -7754,6 +7833,7 @@ client.directories.get(
)
```
+
@@ -7768,7 +7848,7 @@ client.directories.get(
**id:** `str` — String ID of directory. Starts with `dir_`.
-
+
@@ -7776,13 +7856,12 @@ client.directories.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7802,6 +7881,7 @@ client.directories.get(
Delete the Directory with the given ID.
The Directory must be empty (i.e. contain no Directories or Files).
+
@@ -7826,6 +7906,7 @@ client.directories.delete(
)
```
+
@@ -7840,7 +7921,7 @@ client.directories.delete(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`.
-
+
@@ -7848,13 +7929,12 @@ client.directories.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -7872,6 +7952,7 @@ client.directories.delete(
Update the Directory with the given ID.
+
@@ -7896,6 +7977,7 @@ client.directories.update(
)
```
+
@@ -7910,7 +7992,7 @@ client.directories.update(
**id:** `str` — Unique identifier for Directory. Starts with `dir_`.
-
+
@@ -7918,7 +8000,7 @@ client.directories.update(
**name:** `typing.Optional[str]` — Name to set for the directory.
-
+
@@ -7926,7 +8008,7 @@ client.directories.update(
**parent_id:** `typing.Optional[str]` — ID of the parent directory. Specify this to move directories. Starts with `dir_`.
-
+
@@ -7934,7 +8016,7 @@ client.directories.update(
**path:** `typing.Optional[str]` — Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`.
-
+
@@ -7942,18 +8024,18 @@ client.directories.update(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Files
+
client.files.list(...)
-
@@ -7967,6 +8049,7 @@ client.directories.update(
-
Get a paginated list of files.
+
@@ -7989,6 +8072,7 @@ client = Humanloop(
client.files.list()
```
+
@@ -8003,7 +8087,7 @@ client.files.list()
**page:** `typing.Optional[int]` — Page offset for pagination.
-
+
@@ -8011,7 +8095,7 @@ client.files.list()
**size:** `typing.Optional[int]` — Page size for pagination. Number of files to fetch.
-
+
@@ -8019,7 +8103,7 @@ client.files.list()
**name:** `typing.Optional[str]` — Case-insensitive filter for file name.
-
+
@@ -8027,7 +8111,7 @@ client.files.list()
**type:** `typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]]` — List of file types to filter for.
-
+
@@ -8035,7 +8119,7 @@ client.files.list()
**environment:** `typing.Optional[str]` — Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
-
+
@@ -8043,7 +8127,7 @@ client.files.list()
**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by
-
+
@@ -8051,7 +8135,7 @@ client.files.list()
**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
+
@@ -8059,18 +8143,18 @@ client.files.list()
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Evaluations
+
client.evaluations.list(...)
-
@@ -8084,6 +8168,7 @@ client.files.list()
-
Retrieve a list of Evaluations for the specified File.
+
@@ -8114,6 +8199,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -8128,7 +8214,7 @@ for page in response.iter_pages():
**file_id:** `str` — Filter by File ID. Only Evaluations for the specified File will be returned.
-
+
@@ -8136,7 +8222,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -8144,7 +8230,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluations to fetch.
-
+
@@ -8152,13 +8238,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8180,6 +8265,7 @@ Create an Evaluation.
Create a new Evaluation by specifying the File to evaluate, and a name
for the Evaluation.
You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint.
+
@@ -8204,6 +8290,7 @@ client.evaluations.create(
)
```
+
@@ -8218,7 +8305,7 @@ client.evaluations.create(
**evaluators:** `typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams]` — The Evaluators used to evaluate.
-
+
@@ -8226,7 +8313,7 @@ client.evaluations.create(
**file:** `typing.Optional[FileRequestParams]` — The File to associate with the Evaluation. This File contains the Logs you're evaluating.
-
+
@@ -8234,7 +8321,7 @@ client.evaluations.create(
**name:** `typing.Optional[str]` — Name of the Evaluation to help identify it. Must be unique within the associated File.
-
+
@@ -8242,13 +8329,12 @@ client.evaluations.create(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8268,6 +8354,7 @@ client.evaluations.create(
Add Evaluators to an Evaluation.
The Evaluators will be run on the Logs generated for the Evaluation.
+
@@ -8293,6 +8380,7 @@ client.evaluations.add_evaluators(
)
```
+
@@ -8307,7 +8395,7 @@ client.evaluations.add_evaluators(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8315,7 +8403,7 @@ client.evaluations.add_evaluators(
**evaluators:** `typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams]` — The Evaluators to add to this Evaluation.
-
+
@@ -8323,13 +8411,12 @@ client.evaluations.add_evaluators(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8349,6 +8436,7 @@ client.evaluations.add_evaluators(
Remove an Evaluator from an Evaluation.
The Evaluator will no longer be run on the Logs in the Evaluation.
+
@@ -8374,6 +8462,7 @@ client.evaluations.remove_evaluator(
)
```
+
@@ -8388,7 +8477,7 @@ client.evaluations.remove_evaluator(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8396,7 +8485,7 @@ client.evaluations.remove_evaluator(
**evaluator_version_id:** `str` — Unique identifier for Evaluator Version.
-
+
@@ -8404,13 +8493,12 @@ client.evaluations.remove_evaluator(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8434,6 +8522,7 @@ such as its name.
To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint.
To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint.
+
@@ -8458,6 +8547,7 @@ client.evaluations.get(
)
```
+
@@ -8472,7 +8562,7 @@ client.evaluations.get(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8480,13 +8570,12 @@ client.evaluations.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8506,6 +8595,7 @@ client.evaluations.get(
Delete an Evaluation.
The Runs and Evaluators in the Evaluation will not be deleted.
+
@@ -8530,6 +8620,7 @@ client.evaluations.delete(
)
```
+
@@ -8544,7 +8635,7 @@ client.evaluations.delete(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8552,13 +8643,12 @@ client.evaluations.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8576,6 +8666,7 @@ client.evaluations.delete(
List all Runs for an Evaluation.
+
@@ -8600,6 +8691,7 @@ client.evaluations.list_runs_for_evaluation(
)
```
+
@@ -8614,7 +8706,7 @@ client.evaluations.list_runs_for_evaluation(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8622,13 +8714,12 @@ client.evaluations.list_runs_for_evaluation(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8659,6 +8750,7 @@ referencing a datapoint in the specified Dataset will be associated with the Run
To keep updated on the progress of the Run, you can poll the Run using
the `GET /evaluations/{id}/runs` endpoint and check its status.
+
@@ -8683,6 +8775,7 @@ client.evaluations.create_run(
)
```
+
@@ -8697,7 +8790,7 @@ client.evaluations.create_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8705,7 +8798,7 @@ client.evaluations.create_run(
**dataset:** `typing.Optional[CreateRunRequestDatasetParams]` — Dataset to use in this Run.
-
+
@@ -8713,7 +8806,7 @@ client.evaluations.create_run(
**version:** `typing.Optional[CreateRunRequestVersionParams]` — Version to use in this Run.
-
+
@@ -8721,7 +8814,7 @@ client.evaluations.create_run(
**orchestrated:** `typing.Optional[bool]` — Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API.
-
+
@@ -8729,7 +8822,7 @@ client.evaluations.create_run(
**use_existing_logs:** `typing.Optional[bool]` — If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided.
-
+
@@ -8737,13 +8830,12 @@ client.evaluations.create_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8764,6 +8856,7 @@ Add an existing Run to the specified Evaluation.
This is useful if you want to compare the Runs in this Evaluation with an existing Run
that exists within another Evaluation.
+
@@ -8789,6 +8882,7 @@ client.evaluations.add_existing_run(
)
```
+
@@ -8803,7 +8897,7 @@ client.evaluations.add_existing_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8811,7 +8905,7 @@ client.evaluations.add_existing_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -8819,13 +8913,12 @@ client.evaluations.add_existing_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8846,6 +8939,7 @@ Remove a Run from an Evaluation.
The Logs and Versions used in the Run will not be deleted.
If this Run is used in any other Evaluations, it will still be available in those Evaluations.
+
@@ -8871,6 +8965,7 @@ client.evaluations.remove_run(
)
```
+
@@ -8885,7 +8980,7 @@ client.evaluations.remove_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8893,7 +8988,7 @@ client.evaluations.remove_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -8901,13 +8996,12 @@ client.evaluations.remove_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -8928,6 +9022,7 @@ Update an Evaluation Run.
Specify `control=true` to use this Run as the control Run for the Evaluation.
You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed.
+
@@ -8953,6 +9048,7 @@ client.evaluations.update_evaluation_run(
)
```
+
@@ -8967,7 +9063,7 @@ client.evaluations.update_evaluation_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -8975,7 +9071,7 @@ client.evaluations.update_evaluation_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -8983,7 +9079,7 @@ client.evaluations.update_evaluation_run(
**control:** `typing.Optional[bool]` — If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run.
-
+
@@ -8991,7 +9087,7 @@ client.evaluations.update_evaluation_run(
**status:** `typing.Optional[EvaluationStatus]` — Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`.
-
+
@@ -8999,13 +9095,12 @@ client.evaluations.update_evaluation_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9023,6 +9118,7 @@ client.evaluations.update_evaluation_run(
Add the specified Logs to a Run.
+
@@ -9049,6 +9145,7 @@ client.evaluations.add_logs_to_run(
)
```
+
@@ -9063,7 +9160,7 @@ client.evaluations.add_logs_to_run(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -9071,7 +9168,7 @@ client.evaluations.add_logs_to_run(
**run_id:** `str` — Unique identifier for Run.
-
+
@@ -9079,7 +9176,7 @@ client.evaluations.add_logs_to_run(
**log_ids:** `typing.Sequence[str]` — The IDs of the Logs to add to the Run.
-
+
@@ -9087,13 +9184,12 @@ client.evaluations.add_logs_to_run(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9114,6 +9210,7 @@ Get Evaluation Stats.
Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the
corresponding Evaluator statistics (such as the mean and percentiles).
+
@@ -9138,6 +9235,7 @@ client.evaluations.get_stats(
)
```
+
@@ -9152,7 +9250,7 @@ client.evaluations.get_stats(
**id:** `str` — Unique identifier for Evaluation.
-
+
@@ -9160,13 +9258,12 @@ client.evaluations.get_stats(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9186,6 +9283,7 @@ client.evaluations.get_stats(
Get the Logs associated to a specific Evaluation.
This returns the Logs associated to all Runs within with the Evaluation.
+
@@ -9210,6 +9308,7 @@ client.evaluations.get_logs(
)
```
+
@@ -9224,7 +9323,7 @@ client.evaluations.get_logs(
**id:** `str` — String ID of evaluation. Starts with `ev_` or `evr_`.
-
+
@@ -9232,7 +9331,7 @@ client.evaluations.get_logs(
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -9240,7 +9339,7 @@ client.evaluations.get_logs(
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch.
-
+
@@ -9248,18 +9347,18 @@ client.evaluations.get_logs(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
## Logs
+
client.logs.list(...)
-
@@ -9273,6 +9372,7 @@ client.evaluations.get_logs(
-
List all Logs for the given filter criteria.
+
@@ -9303,6 +9403,7 @@ for page in response.iter_pages():
yield page
```
+
@@ -9317,7 +9418,7 @@ for page in response.iter_pages():
**file_id:** `str` — Unique identifier for the File to list Logs for.
-
+
@@ -9325,7 +9426,7 @@ for page in response.iter_pages():
**page:** `typing.Optional[int]` — Page number for pagination.
-
+
@@ -9333,7 +9434,7 @@ for page in response.iter_pages():
**size:** `typing.Optional[int]` — Page size for pagination. Number of Logs to fetch.
-
+
@@ -9341,7 +9442,7 @@ for page in response.iter_pages():
**version_id:** `typing.Optional[str]` — If provided, only Logs belonging to the specified Version will be returned.
-
+
@@ -9349,7 +9450,7 @@ for page in response.iter_pages():
**version_status:** `typing.Optional[VersionStatus]` — If provided, only Logs belonging to Versions with the specified status will be returned.
-
+
@@ -9357,7 +9458,7 @@ for page in response.iter_pages():
**search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its inputs and output will be returned.
-
+
@@ -9365,7 +9466,7 @@ for page in response.iter_pages():
**metadata_search:** `typing.Optional[str]` — If provided, only Logs that contain the provided string in its metadata will be returned.
-
+
@@ -9373,7 +9474,7 @@ for page in response.iter_pages():
**start_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created after the specified date will be returned.
-
+
@@ -9381,7 +9482,7 @@ for page in response.iter_pages():
**end_date:** `typing.Optional[dt.datetime]` — If provided, only Logs created before the specified date will be returned.
-
+
@@ -9389,7 +9490,7 @@ for page in response.iter_pages():
**include_parent:** `typing.Optional[bool]` — If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs.
-
+
@@ -9397,7 +9498,7 @@ for page in response.iter_pages():
**in_trace_filter:** `typing.Optional[typing.Union[bool, typing.Sequence[bool]]]` — If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
-
+
@@ -9405,13 +9506,12 @@ for page in response.iter_pages():
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9429,6 +9529,7 @@ for page in response.iter_pages():
Delete Logs with the given IDs.
+
@@ -9453,6 +9554,7 @@ client.logs.delete(
)
```
+
@@ -9467,7 +9569,7 @@ client.logs.delete(
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Unique identifiers for the Logs to delete.
-
+
@@ -9475,13 +9577,12 @@ client.logs.delete(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
@@ -9499,6 +9600,7 @@ client.logs.delete(
Retrieve the Log with the given ID.
+
@@ -9523,6 +9625,7 @@ client.logs.get(
)
```
+
@@ -9537,7 +9640,7 @@ client.logs.get(
**id:** `str` — Unique identifier for Log.
-
+
@@ -9545,14 +9648,12 @@ client.logs.get(
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
+
-
-
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 4ce861b0..780246da 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -85,6 +85,7 @@
ListTools,
LlmEvaluatorRequest,
LogResponse,
+ LogsAssociationType,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -151,6 +152,7 @@
VersionIdResponse,
VersionIdResponseVersion,
VersionReferenceResponse,
+ VersionSpecification,
VersionStatsResponse,
VersionStatsResponseEvaluatorVersionStatsItem,
VersionStatus,
@@ -309,6 +311,7 @@
VersionIdResponseParams,
VersionIdResponseVersionParams,
VersionReferenceResponseParams,
+ VersionSpecificationParams,
VersionStatsResponseEvaluatorVersionStatsItemParams,
VersionStatsResponseParams,
)
@@ -483,6 +486,7 @@
"LlmEvaluatorRequestParams",
"LogResponse",
"LogResponseParams",
+ "LogsAssociationType",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -612,6 +616,8 @@
"VersionIdResponseVersionParams",
"VersionReferenceResponse",
"VersionReferenceResponseParams",
+ "VersionSpecification",
+ "VersionSpecificationParams",
"VersionStatsResponse",
"VersionStatsResponseEvaluatorVersionStatsItem",
"VersionStatsResponseEvaluatorVersionStatsItemParams",
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index d1349af7..c08b9b20 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -7,7 +7,9 @@
class BaseClientWrapper:
- def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None):
+ def __init__(
+ self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None
+ ):
self.api_key = api_key
self._base_url = base_url
self._timeout = timeout
@@ -30,7 +32,12 @@ def get_timeout(self) -> typing.Optional[float]:
class SyncClientWrapper(BaseClientWrapper):
def __init__(
- self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.Client
+ self,
+ *,
+ api_key: str,
+ base_url: str,
+ timeout: typing.Optional[float] = None,
+ httpx_client: httpx.Client,
):
super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
self.httpx_client = HttpClient(
@@ -43,7 +50,12 @@ def __init__(
class AsyncClientWrapper(BaseClientWrapper):
def __init__(
- self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None, httpx_client: httpx.AsyncClient
+ self,
+ *,
+ api_key: str,
+ base_url: str,
+ timeout: typing.Optional[float] = None,
+ httpx_client: httpx.AsyncClient,
):
super().__init__(api_key=api_key, base_url=base_url, timeout=timeout)
self.httpx_client = AsyncHttpClient(
diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py
index f0b5af8c..c1b841c7 100644
--- a/src/humanloop/evaluations/client.py
+++ b/src/humanloop/evaluations/client.py
@@ -11,18 +11,29 @@
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
-from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
+from .requests.create_evaluation_request_evaluators_item import (
+ CreateEvaluationRequestEvaluatorsItemParams,
+)
+from ..requests.evaluations_request import EvaluationsRequestParams
from ..requests.file_request import FileRequestParams
from ..core.serialization import convert_and_respect_annotation_metadata
-from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams
+from .requests.add_evaluators_request_evaluators_item import (
+ AddEvaluatorsRequestEvaluatorsItemParams,
+)
from ..core.jsonable_encoder import jsonable_encoder
from ..types.evaluation_runs_response import EvaluationRunsResponse
from .requests.create_run_request_dataset import CreateRunRequestDatasetParams
from .requests.create_run_request_version import CreateRunRequestVersionParams
from ..types.evaluation_run_response import EvaluationRunResponse
from ..types.evaluation_status import EvaluationStatus
+from ..requests.evaluations_dataset_request import EvaluationsDatasetRequestParams
+from ..requests.version_specification import VersionSpecificationParams
+from ..types.logs_association_type import LogsAssociationType
+from ..types.evaluation_run_response import EvaluationRunResponse
from ..types.evaluation_stats import EvaluationStats
-from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
+from ..types.paginated_data_evaluation_log_response import (
+ PaginatedDataEvaluationLogResponse,
+)
from ..core.client_wrapper import AsyncClientWrapper
from ..core.pagination import AsyncPager
@@ -180,7 +191,9 @@ def create(
"name": name,
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams],
+ annotation=typing.Sequence[
+ CreateEvaluationRequestEvaluatorsItemParams
+ ],
direction="write",
),
},
@@ -257,7 +270,9 @@ def add_evaluators(
json={
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams],
+ annotation=typing.Sequence[
+ AddEvaluatorsRequestEvaluatorsItemParams
+ ],
direction="write",
),
},
@@ -289,7 +304,11 @@ def add_evaluators(
raise ApiError(status_code=_response.status_code, body=_response_json)
def remove_evaluator(
- self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: str,
+ evaluator_version_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
) -> EvaluationResponse:
"""
Remove an Evaluator from an Evaluation.
@@ -353,7 +372,9 @@ def remove_evaluator(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
+ def get(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EvaluationResponse:
"""
Get an Evaluation.
@@ -416,7 +437,9 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
"""
Delete an Evaluation.
@@ -594,10 +617,14 @@ def create_run(
method="POST",
json={
"dataset": convert_and_respect_annotation_metadata(
- object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write"
+ object_=dataset,
+ annotation=CreateRunRequestDatasetParams,
+ direction="write",
),
"version": convert_and_respect_annotation_metadata(
- object_=version, annotation=CreateRunRequestVersionParams, direction="write"
+ object_=version,
+ annotation=CreateRunRequestVersionParams,
+ direction="write",
),
"orchestrated": orchestrated,
"use_existing_logs": use_existing_logs,
@@ -630,7 +657,11 @@ def create_run(
raise ApiError(status_code=_response.status_code, body=_response_json)
def add_existing_run(
- self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: str,
+ run_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Add an existing Run to the specified Evaluation.
@@ -695,7 +726,13 @@ def add_existing_run(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def remove_run(
+ self,
+ id: str,
+ run_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
"""
Remove a Run from an Evaluation.
@@ -911,7 +948,9 @@ def add_logs_to_run(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
+ def get_stats(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EvaluationStats:
"""
Get Evaluation Stats.
@@ -1214,7 +1253,9 @@ async def main() -> None:
"name": name,
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams],
+ annotation=typing.Sequence[
+ CreateEvaluationRequestEvaluatorsItemParams
+ ],
direction="write",
),
},
@@ -1299,7 +1340,9 @@ async def main() -> None:
json={
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams],
+ annotation=typing.Sequence[
+ AddEvaluatorsRequestEvaluatorsItemParams
+ ],
direction="write",
),
},
@@ -1331,7 +1374,11 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def remove_evaluator(
- self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: str,
+ evaluator_version_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
) -> EvaluationResponse:
"""
Remove an Evaluator from an Evaluation.
@@ -1403,7 +1450,9 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
+ async def get(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EvaluationResponse:
"""
Get an Evaluation.
@@ -1474,7 +1523,9 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
"""
Delete an Evaluation.
@@ -1676,10 +1727,14 @@ async def main() -> None:
method="POST",
json={
"dataset": convert_and_respect_annotation_metadata(
- object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write"
+ object_=dataset,
+ annotation=CreateRunRequestDatasetParams,
+ direction="write",
),
"version": convert_and_respect_annotation_metadata(
- object_=version, annotation=CreateRunRequestVersionParams, direction="write"
+ object_=version,
+ annotation=CreateRunRequestVersionParams,
+ direction="write",
),
"orchestrated": orchestrated,
"use_existing_logs": use_existing_logs,
@@ -1712,7 +1767,11 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def add_existing_run(
- self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: str,
+ run_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
) -> typing.Optional[typing.Any]:
"""
Add an existing Run to the specified Evaluation.
@@ -1786,7 +1845,11 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response_json)
async def remove_run(
- self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: str,
+ run_id: str,
+ *,
+ request_options: typing.Optional[RequestOptions] = None,
) -> None:
"""
Remove a Run from an Evaluation.
@@ -2027,7 +2090,9 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
+ async def get_stats(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> EvaluationStats:
"""
Get Evaluation Stats.
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 1884b45c..4b4671e7 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -193,10 +193,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1357,10 +1357,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index 22e2747f..88cfa117 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -232,7 +232,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2097,7 +2097,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index 669a1180..4e9680b0 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -127,6 +127,7 @@
from .version_id_response import VersionIdResponseParams
from .version_id_response_version import VersionIdResponseVersionParams
from .version_reference_response import VersionReferenceResponseParams
+from .version_specification import VersionSpecificationParams
from .version_stats_response import VersionStatsResponseParams
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
@@ -248,6 +249,7 @@
"VersionIdResponseParams",
"VersionIdResponseVersionParams",
"VersionReferenceResponseParams",
+ "VersionSpecificationParams",
"VersionStatsResponseEvaluatorVersionStatsItemParams",
"VersionStatsResponseParams",
]
diff --git a/src/humanloop/requests/evaluation_log_response.py b/src/humanloop/requests/evaluation_log_response.py
index e423d2f7..c4fdae63 100644
--- a/src/humanloop/requests/evaluation_log_response.py
+++ b/src/humanloop/requests/evaluation_log_response.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from .datapoint_response import DatapointResponseParams
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py
index e9127722..cc97328a 100644
--- a/src/humanloop/requests/run_stats_response.py
+++ b/src/humanloop/requests/run_stats_response.py
@@ -3,7 +3,9 @@
import typing_extensions
import typing_extensions
import typing
-from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams
+from .run_stats_response_evaluator_stats_item import (
+ RunStatsResponseEvaluatorStatsItemParams,
+)
from ..types.evaluation_status import EvaluationStatus
diff --git a/src/humanloop/requests/version_specification.py b/src/humanloop/requests/version_specification.py
new file mode 100644
index 00000000..34606269
--- /dev/null
+++ b/src/humanloop/requests/version_specification.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class VersionSpecificationParams(typing_extensions.TypedDict):
+ """
+ Specification of a File version on Humanloop.
+
+ This can be done in a couple of ways:
+
+ - Specifying `version_id` directly.
+ - Specifying a File (and optionally an Environment).
+ - A File can be specified by either `path` or `file_id`.
+ - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used.
+ """
+
+ version_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the File Version. If provided, none of the other fields should be specified.
+ """
+
+ path: typing_extensions.NotRequired[str]
+ """
+ Path identifying a File. Provide either this or `file_id` if you want to specify a File.
+ """
+
+ file_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the File. Provide either this or `path` if you want to specify a File.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used.
+ """
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index df3f892b..4e592987 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -86,6 +86,7 @@
from .list_tools import ListTools
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
+from .logs_association_type import LogsAssociationType
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -156,6 +157,7 @@
from .version_id_response import VersionIdResponse
from .version_id_response_version import VersionIdResponseVersion
from .version_reference_response import VersionReferenceResponse
+from .version_specification import VersionSpecification
from .version_stats_response import VersionStatsResponse
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem
from .version_status import VersionStatus
@@ -245,6 +247,7 @@
"ListTools",
"LlmEvaluatorRequest",
"LogResponse",
+ "LogsAssociationType",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -311,6 +314,7 @@
"VersionIdResponse",
"VersionIdResponseVersion",
"VersionReferenceResponse",
+ "VersionSpecification",
"VersionStatsResponse",
"VersionStatsResponseEvaluatorVersionStatsItem",
"VersionStatus",
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 32ff5b40..2cbbb5e4 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -43,7 +43,9 @@ class EvaluationLogResponse(UncheckedBaseModel):
"""
if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
+ extra="allow", frozen=True
+ ) # type: ignore # Pydantic v2
else:
class Config:
@@ -56,10 +58,14 @@ class Config:
update_forward_refs(EvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse)
update_forward_refs(FlowLogResponse, EvaluationLogResponse=EvaluationLogResponse)
update_forward_refs(FlowResponse, EvaluationLogResponse=EvaluationLogResponse)
-update_forward_refs(MonitoringEvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse)
+update_forward_refs(
+ MonitoringEvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse
+)
update_forward_refs(PromptLogResponse, EvaluationLogResponse=EvaluationLogResponse)
update_forward_refs(PromptResponse, EvaluationLogResponse=EvaluationLogResponse)
update_forward_refs(ToolLogResponse, EvaluationLogResponse=EvaluationLogResponse)
update_forward_refs(ToolResponse, EvaluationLogResponse=EvaluationLogResponse)
-update_forward_refs(VersionDeploymentResponse, EvaluationLogResponse=EvaluationLogResponse)
+update_forward_refs(
+ VersionDeploymentResponse, EvaluationLogResponse=EvaluationLogResponse
+)
update_forward_refs(VersionIdResponse, EvaluationLogResponse=EvaluationLogResponse)
diff --git a/src/humanloop/types/logs_association_type.py b/src/humanloop/types/logs_association_type.py
new file mode 100644
index 00000000..c904b93c
--- /dev/null
+++ b/src/humanloop/types/logs_association_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LogsAssociationType = typing.Union[typing.Literal["dynamic", "fixed"], typing.Any]
diff --git a/src/humanloop/types/overall_stats.py b/src/humanloop/types/overall_stats.py
index c3753321..7b0c35aa 100644
--- a/src/humanloop/types/overall_stats.py
+++ b/src/humanloop/types/overall_stats.py
@@ -33,7 +33,9 @@ class OverallStats(UncheckedBaseModel):
"""
if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
+ extra="allow", frozen=True
+ ) # type: ignore # Pydantic v2
else:
class Config:
diff --git a/src/humanloop/types/run_stats_response.py b/src/humanloop/types/run_stats_response.py
index dbc1be73..86f91f89 100644
--- a/src/humanloop/types/run_stats_response.py
+++ b/src/humanloop/types/run_stats_response.py
@@ -44,7 +44,9 @@ class RunStatsResponse(UncheckedBaseModel):
"""
if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
+ extra="allow", frozen=True
+ ) # type: ignore # Pydantic v2
else:
class Config:
diff --git a/src/humanloop/types/version_specification.py b/src/humanloop/types/version_specification.py
new file mode 100644
index 00000000..bb3464ce
--- /dev/null
+++ b/src/humanloop/types/version_specification.py
@@ -0,0 +1,48 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class VersionSpecification(UncheckedBaseModel):
+ """
+ Specification of a File version on Humanloop.
+
+ This can be done in a couple of ways:
+
+ - Specifying `version_id` directly.
+ - Specifying a File (and optionally an Environment).
+ - A File can be specified by either `path` or `file_id`.
+ - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used.
+ """
+
+ version_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the File Version. If provided, none of the other fields should be specified.
+ """
+
+ path: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Path identifying a File. Provide either this or `file_id` if you want to specify a File.
+ """
+
+ file_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the File. Provide either this or `path` if you want to specify a File.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
From 5d7100381b3eaed834644489e2a554de2b8dda52 Mon Sep 17 00:00:00 2001
From: Harry Xie
Date: Tue, 29 Oct 2024 03:10:52 +0000
Subject: [PATCH 02/70] update eval_utils with runs api change
---
src/humanloop/eval_utils.py | 83 +++++++++----------------------------
1 file changed, 20 insertions(+), 63 deletions(-)
diff --git a/src/humanloop/eval_utils.py b/src/humanloop/eval_utils.py
index 1e003e97..e0c2b424 100644
--- a/src/humanloop/eval_utils.py
+++ b/src/humanloop/eval_utils.py
@@ -63,9 +63,7 @@
if not logger.hasHandlers():
logger.addHandler(console_handler)
-EvaluatorDict = Union[
- CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator
-]
+EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
FileType = Literal["flow", "prompt", "tool", "evaluator"]
@@ -187,13 +185,9 @@ def _run_eval(
function_ = file.pop("callable")
except KeyError as _:
if type_ == "flow":
- raise ValueError(
- "You must provide a `callable` for your Flow `file` to run a local eval."
- )
+ raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.")
else:
- logger.info(
- f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop."
- )
+ logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.")
file_dict = {**file, **version}
@@ -210,9 +204,7 @@ def _run_eval(
try:
_ = Prompt.parse_obj(version)
except ValidationError as error_:
- logger.error(
- msg=f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)"
- )
+ logger.error(msg=f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
raise error_
hl_file = client.prompts.upsert(**file_dict)
@@ -220,9 +212,7 @@ def _run_eval(
try:
_ = Tool.parse_obj(version)
except ValidationError as error_:
- logger.error(
- msg=f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n)"
- )
+ logger.error(msg=f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
raise error_
hl_file = client.tools.upsert(**file_dict)
@@ -233,17 +223,13 @@ def _run_eval(
raise NotImplementedError(f"Unsupported File type: {type_}")
# Upsert the Dataset
- action = dataset.get(
- "action", "set"
- ) # set is the server default - None not allowed.
+ action = dataset.get("action", "set") # set is the server default - None not allowed.
if "datapoints" not in dataset:
dataset["datapoints"] = []
# Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path.
action = "add"
hl_dataset = client.datasets.upsert(**dataset, action=action)
- hl_dataset = client.datasets.get(
- id=hl_dataset.id, version_id=hl_dataset.version_id, include_datapoints=True
- )
+ hl_dataset = client.datasets.get(id=hl_dataset.id, version_id=hl_dataset.version_id, include_datapoints=True)
# Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id`
local_evaluators: List[Evaluator] = []
@@ -264,9 +250,7 @@ def _run_eval(
attributes={"code": inspect.getsource(eval_function)},
evaluator_type="external",
)
- _ = client.evaluators.upsert(
- id=evaluator.get("id"), path=evaluator.get("path"), spec=spec
- )
+ _ = client.evaluators.upsert(id=evaluator.get("id"), path=evaluator.get("path"), spec=spec)
# Validate upfront that the local Evaluators and Dataset fit
requires_target = False
@@ -326,9 +310,7 @@ def process_datapoint(datapoint: Datapoint):
datapoint_dict = datapoint.dict()
try:
if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
- output = function_(
- **datapoint_dict["inputs"], messages=datapoint_dict["messages"]
- )
+ output = function_(**datapoint_dict["inputs"], messages=datapoint_dict["messages"])
else:
output = function_(**datapoint_dict["inputs"])
@@ -337,9 +319,7 @@ def process_datapoint(datapoint: Datapoint):
output = json.dumps(output)
# throw error if it fails to serialize
except Exception as _:
- raise ValueError(
- f"Your {type_}'s `callable` must return a string or a JSON serializable object."
- )
+ raise ValueError(f"Your {type_}'s `callable` must return a string or a JSON serializable object.")
log = log_func(
inputs=datapoint.inputs,
output=output,
@@ -355,9 +335,7 @@ def process_datapoint(datapoint: Datapoint):
start_time=start_time,
end_time=datetime.now(),
)
- logger.warning(
- msg=f"\nYour {type_}'s `callable` failed for Datapoint: {datapoint.id}. \n Error: {str(e)}"
- )
+ logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {datapoint.id}. \n Error: {str(e)}")
# Apply local Evaluators
for local_evaluator in local_evaluators:
@@ -386,9 +364,7 @@ def process_datapoint(datapoint: Datapoint):
start_time=start_time,
end_time=datetime.now(),
)
- logger.warning(
- f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}"
- )
+ logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
# Execute the function and send the logs to Humanloop in parallel
total_datapoints = len(hl_dataset.datapoints)
@@ -403,18 +379,13 @@ def process_datapoint(datapoint: Datapoint):
)
completed_tasks = 0
with ThreadPoolExecutor(max_workers=workers) as executor:
- futures = [
- executor.submit(process_datapoint, datapoint)
- for datapoint in hl_dataset.datapoints
- ]
+ futures = [executor.submit(process_datapoint, datapoint) for datapoint in hl_dataset.datapoints]
for _ in as_completed(futures):
completed_tasks += 1
_progress_bar(total_datapoints, completed_tasks)
else:
# TODO: trigger run when updated API is available
- logger.info(
- f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}"
- )
+ logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}")
# Wait for the Evaluation to complete then print the results
complete = False
@@ -437,10 +408,7 @@ def process_datapoint(datapoint: Datapoint):
# Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run.
# (Or the logs would not be helpful)
- if (
- any(evaluator.get("threshold") is not None for evaluator in evaluators)
- or len(stats.run_stats) > 1
- ):
+ if any(evaluator.get("threshold") is not None for evaluator in evaluators) or len(stats.run_stats) > 1:
for evaluator in evaluators:
_, score, delta = check_evaluation_improvement(
evaluation=evaluation,
@@ -555,13 +523,9 @@ def get_evaluator_stats_by_path(
) -> Dict[str, Union[NumericStats, BooleanStats]]:
"""Get the Evaluator stats by path."""
# TODO: Update the API so this is not necessary
- evaluators_by_id = {
- evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators
- }
+ evaluators_by_id = {evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators}
evaluator_stats_by_path = {
- evaluators_by_id[
- evaluator_stat.evaluator_version_id
- ].version.path: evaluator_stat
+ evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat
for evaluator_stat in stat.evaluator_stats
}
return evaluator_stats_by_path
@@ -622,10 +586,7 @@ def check_evaluation_improvement(
stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1
evaluation=evaluation,
)
- if (
- evaluator_path in latest_evaluator_stats_by_path
- and evaluator_path in previous_evaluator_stats_by_path
- ):
+ if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path:
latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path]
previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path]
latest_score = get_score_from_evaluator_stat(stat=latest_evaluator_stat)
@@ -634,14 +595,10 @@ def check_evaluation_improvement(
raise ValueError(f"Could not find score for Evaluator {evaluator_path}.")
diff = round(latest_score - previous_score, 2)
if diff >= 0:
- logger.info(
- f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}"
- )
+ logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
return True, latest_score, diff
else:
- logger.info(
- f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}"
- )
+ logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
return False, latest_score, diff
else:
raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
From 9e50a5b0712d60465acfe54f75563a356a29e47d Mon Sep 17 00:00:00 2001
From: harry-humanloop <91058617+harry-humanloop@users.noreply.github.com>
Date: Sun, 3 Nov 2024 20:33:27 +0000
Subject: [PATCH 03/70] Eval runs (#26)
* Release 0.8.9
* update eval_utils with runs api change
* ruff
* clean up if condition so that url will be printed even for first run
---------
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
---
.gitignore | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/.gitignore b/.gitignore
index 0da665fe..706998ab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,7 @@ dist/
__pycache__/
poetry.toml
.ruff_cache/
+.idea
+.vscode
+.DS_Store
+.env
\ No newline at end of file
From 0e3e0b4276c0d759974ea8fedfae0daecb31fdc4 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Sun, 3 Nov 2024 20:38:23 +0000
Subject: [PATCH 04/70] Release 0.8.9a1
---
README.md | 4 +-
reference.md | 106 +++++++++++++++++-
src/humanloop/__init__.py | 6 -
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/evaluations/client.py | 41 ++-----
src/humanloop/flows/client.py | 8 +-
src/humanloop/logs/client.py | 12 ++
src/humanloop/prompts/client.py | 4 +-
src/humanloop/requests/__init__.py | 2 -
.../requests/evaluation_log_response.py | 1 +
src/humanloop/requests/run_stats_response.py | 1 -
.../requests/version_specification.py | 37 ------
src/humanloop/types/__init__.py | 4 -
src/humanloop/types/logs_association_type.py | 5 -
src/humanloop/types/version_specification.py | 48 --------
15 files changed, 136 insertions(+), 147 deletions(-)
delete mode 100644 src/humanloop/requests/version_specification.py
delete mode 100644 src/humanloop/types/logs_association_type.py
delete mode 100644 src/humanloop/types/version_specification.py
diff --git a/README.md b/README.md
index 6dae7103..97779deb 100644
--- a/README.md
+++ b/README.md
@@ -41,7 +41,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
+ "2024-07-19 00:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -88,7 +88,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
+ "2024-07-19 00:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
diff --git a/reference.md b/reference.md
index b08f0261..c4bba671 100644
--- a/reference.md
+++ b/reference.md
@@ -59,7 +59,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
+ "2024-07-19 00:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -5216,6 +5216,11 @@ client.evaluators.log(
**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -5223,8 +5228,14 @@ client.evaluators.log(
-
+<<<<<<< HEAD
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+
+=======
**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+> > > > > > > 32f482a (Release 0.8.9a1)
+
@@ -5423,8 +5434,14 @@ client.evaluators.upsert(
-
+<<<<<<< HEAD
+**spec:** `EvaluatorRequestSpecParams`
+
+=======
**spec:** `EvaluatorRequestSpecParams`
+> > > > > > > 32f482a (Release 0.8.9a1)
+
@@ -6296,10 +6313,10 @@ client.flows.log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
+ "2024-07-08 22:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
+ "2024-07-08 22:40:39+00:00",
),
)
@@ -8168,6 +8185,11 @@ client.files.list()
Retrieve a list of Evaluations for the specified File.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8305,6 +8327,11 @@ client.evaluations.create(
**evaluators:** `typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams]` — The Evaluators used to evaluate.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8354,6 +8381,11 @@ client.evaluations.create(
Add Evaluators to an Evaluation.
The Evaluators will be run on the Logs generated for the Evaluation.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8403,6 +8435,11 @@ client.evaluations.add_evaluators(
**evaluators:** `typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams]` — The Evaluators to add to this Evaluation.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8436,6 +8473,11 @@ client.evaluations.add_evaluators(
Remove an Evaluator from an Evaluation.
The Evaluator will no longer be run on the Logs in the Evaluation.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8522,6 +8564,11 @@ such as its name.
To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint.
To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8595,6 +8642,11 @@ client.evaluations.get(
Delete an Evaluation.
The Runs and Evaluators in the Evaluation will not be deleted.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8798,6 +8850,11 @@ client.evaluations.create_run(
**dataset:** `typing.Optional[CreateRunRequestDatasetParams]` — Dataset to use in this Run.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8806,6 +8863,11 @@ client.evaluations.create_run(
**version:** `typing.Optional[CreateRunRequestVersionParams]` — Version to use in this Run.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8822,6 +8884,11 @@ client.evaluations.create_run(
**use_existing_logs:** `typing.Optional[bool]` — If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -8856,6 +8923,11 @@ Add an existing Run to the specified Evaluation.
This is useful if you want to compare the Runs in this Evaluation with an existing Run
that exists within another Evaluation.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -9022,6 +9094,11 @@ Update an Evaluation Run.
Specify `control=true` to use this Run as the control Run for the Evaluation.
You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -9079,6 +9156,11 @@ client.evaluations.update_evaluation_run(
**control:** `typing.Optional[bool]` — If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -9087,6 +9169,11 @@ client.evaluations.update_evaluation_run(
**status:** `typing.Optional[EvaluationStatus]` — Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -9118,6 +9205,11 @@ client.evaluations.update_evaluation_run(
Add the specified Logs to a Run.
+<<<<<<< HEAD
+
+=======
+
+> > > > > > > 32f482a (Release 0.8.9a1)
@@ -9505,6 +9597,14 @@ for page in response.iter_pages():
-
+**sample_n:** `typing.Optional[int]` — If provided, only a random sample of approximately N Logs will be returned.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 780246da..4ce861b0 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -85,7 +85,6 @@
ListTools,
LlmEvaluatorRequest,
LogResponse,
- LogsAssociationType,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -152,7 +151,6 @@
VersionIdResponse,
VersionIdResponseVersion,
VersionReferenceResponse,
- VersionSpecification,
VersionStatsResponse,
VersionStatsResponseEvaluatorVersionStatsItem,
VersionStatus,
@@ -311,7 +309,6 @@
VersionIdResponseParams,
VersionIdResponseVersionParams,
VersionReferenceResponseParams,
- VersionSpecificationParams,
VersionStatsResponseEvaluatorVersionStatsItemParams,
VersionStatsResponseParams,
)
@@ -486,7 +483,6 @@
"LlmEvaluatorRequestParams",
"LogResponse",
"LogResponseParams",
- "LogsAssociationType",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -616,8 +612,6 @@
"VersionIdResponseVersionParams",
"VersionReferenceResponse",
"VersionReferenceResponseParams",
- "VersionSpecification",
- "VersionSpecificationParams",
"VersionStatsResponse",
"VersionStatsResponseEvaluatorVersionStatsItem",
"VersionStatsResponseEvaluatorVersionStatsItemParams",
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index c08b9b20..2fb335f8 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -7,9 +7,7 @@
class BaseClientWrapper:
- def __init__(
- self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None
- ):
+ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[float] = None):
self.api_key = api_key
self._base_url = base_url
self._timeout = timeout
diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py
index c1b841c7..5f696145 100644
--- a/src/humanloop/evaluations/client.py
+++ b/src/humanloop/evaluations/client.py
@@ -30,6 +30,7 @@
from ..requests.version_specification import VersionSpecificationParams
from ..types.logs_association_type import LogsAssociationType
from ..types.evaluation_run_response import EvaluationRunResponse
+from ..types.evaluation_status import EvaluationStatus
from ..types.evaluation_stats import EvaluationStats
from ..types.paginated_data_evaluation_log_response import (
PaginatedDataEvaluationLogResponse,
@@ -191,9 +192,7 @@ def create(
"name": name,
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[
- CreateEvaluationRequestEvaluatorsItemParams
- ],
+ annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams],
direction="write",
),
},
@@ -270,9 +269,7 @@ def add_evaluators(
json={
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[
- AddEvaluatorsRequestEvaluatorsItemParams
- ],
+ annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams],
direction="write",
),
},
@@ -372,9 +369,7 @@ def remove_evaluator(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def get(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EvaluationResponse:
+ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
"""
Get an Evaluation.
@@ -437,9 +432,7 @@ def get(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def delete(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> None:
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
Delete an Evaluation.
@@ -948,9 +941,7 @@ def add_logs_to_run(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def get_stats(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EvaluationStats:
+ def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
"""
Get Evaluation Stats.
@@ -1253,9 +1244,7 @@ async def main() -> None:
"name": name,
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[
- CreateEvaluationRequestEvaluatorsItemParams
- ],
+ annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams],
direction="write",
),
},
@@ -1340,9 +1329,7 @@ async def main() -> None:
json={
"evaluators": convert_and_respect_annotation_metadata(
object_=evaluators,
- annotation=typing.Sequence[
- AddEvaluatorsRequestEvaluatorsItemParams
- ],
+ annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams],
direction="write",
),
},
@@ -1450,9 +1437,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def get(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EvaluationResponse:
+ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
"""
Get an Evaluation.
@@ -1523,9 +1508,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def delete(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> None:
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
Delete an Evaluation.
@@ -2090,9 +2073,7 @@ async def main() -> None:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def get_stats(
- self, id: str, *, request_options: typing.Optional[RequestOptions] = None
- ) -> EvaluationStats:
+ async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
"""
Get Evaluation Stats.
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 4b4671e7..1884b45c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -193,10 +193,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
+ "2024-07-08 22:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
+ "2024-07-08 22:40:39+00:00",
),
)
"""
@@ -1357,10 +1357,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
+ "2024-07-08 22:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
+ "2024-07-08 22:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index b6cfadb9..3e38e860 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -37,6 +37,7 @@ def list(
end_date: typing.Optional[dt.datetime] = None,
include_parent: typing.Optional[bool] = None,
in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None,
+ sample_n: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[LogResponse]:
"""
@@ -77,6 +78,9 @@ def list(
in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]]
If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
+ sample_n : typing.Optional[int]
+ If provided, only a random sample of approximately N Logs will be returned.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -118,6 +122,7 @@ def list(
"end_date": serialize_datetime(end_date) if end_date is not None else None,
"include_parent": include_parent,
"in_trace_filter": in_trace_filter,
+ "sample_n": sample_n,
},
request_options=request_options,
)
@@ -143,6 +148,7 @@ def list(
end_date=end_date,
include_parent=include_parent,
in_trace_filter=in_trace_filter,
+ sample_n=sample_n,
request_options=request_options,
)
_items = _parsed_response.records
@@ -296,6 +302,7 @@ async def list(
end_date: typing.Optional[dt.datetime] = None,
include_parent: typing.Optional[bool] = None,
in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None,
+ sample_n: typing.Optional[int] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[LogResponse]:
"""
@@ -336,6 +343,9 @@ async def list(
in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]]
If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
+ sample_n : typing.Optional[int]
+ If provided, only a random sample of approximately N Logs will be returned.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -385,6 +395,7 @@ async def main() -> None:
"end_date": serialize_datetime(end_date) if end_date is not None else None,
"include_parent": include_parent,
"in_trace_filter": in_trace_filter,
+ "sample_n": sample_n,
},
request_options=request_options,
)
@@ -410,6 +421,7 @@ async def main() -> None:
end_date=end_date,
include_parent=include_parent,
in_trace_filter=in_trace_filter,
+ sample_n=sample_n,
request_options=request_options,
)
_items = _parsed_response.records
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index 88cfa117..22e2747f 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -232,7 +232,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
+ "2024-07-19 00:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2097,7 +2097,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
+ "2024-07-19 00:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index 4e9680b0..669a1180 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -127,7 +127,6 @@
from .version_id_response import VersionIdResponseParams
from .version_id_response_version import VersionIdResponseVersionParams
from .version_reference_response import VersionReferenceResponseParams
-from .version_specification import VersionSpecificationParams
from .version_stats_response import VersionStatsResponseParams
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
@@ -249,7 +248,6 @@
"VersionIdResponseParams",
"VersionIdResponseVersionParams",
"VersionReferenceResponseParams",
- "VersionSpecificationParams",
"VersionStatsResponseEvaluatorVersionStatsItemParams",
"VersionStatsResponseParams",
]
diff --git a/src/humanloop/requests/evaluation_log_response.py b/src/humanloop/requests/evaluation_log_response.py
index c4fdae63..e423d2f7 100644
--- a/src/humanloop/requests/evaluation_log_response.py
+++ b/src/humanloop/requests/evaluation_log_response.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+import typing_extensions
import typing_extensions
from .datapoint_response import DatapointResponseParams
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py
index cc97328a..5491ee6d 100644
--- a/src/humanloop/requests/run_stats_response.py
+++ b/src/humanloop/requests/run_stats_response.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
import typing
from .run_stats_response_evaluator_stats_item import (
diff --git a/src/humanloop/requests/version_specification.py b/src/humanloop/requests/version_specification.py
deleted file mode 100644
index 34606269..00000000
--- a/src/humanloop/requests/version_specification.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-
-
-class VersionSpecificationParams(typing_extensions.TypedDict):
- """
- Specification of a File version on Humanloop.
-
- This can be done in a couple of ways:
-
- - Specifying `version_id` directly.
- - Specifying a File (and optionally an Environment).
- - A File can be specified by either `path` or `file_id`.
- - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used.
- """
-
- version_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the File Version. If provided, none of the other fields should be specified.
- """
-
- path: typing_extensions.NotRequired[str]
- """
- Path identifying a File. Provide either this or `file_id` if you want to specify a File.
- """
-
- file_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the File. Provide either this or `path` if you want to specify a File.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used.
- """
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 4e592987..df3f892b 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -86,7 +86,6 @@
from .list_tools import ListTools
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
-from .logs_association_type import LogsAssociationType
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -157,7 +156,6 @@
from .version_id_response import VersionIdResponse
from .version_id_response_version import VersionIdResponseVersion
from .version_reference_response import VersionReferenceResponse
-from .version_specification import VersionSpecification
from .version_stats_response import VersionStatsResponse
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem
from .version_status import VersionStatus
@@ -247,7 +245,6 @@
"ListTools",
"LlmEvaluatorRequest",
"LogResponse",
- "LogsAssociationType",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -314,7 +311,6 @@
"VersionIdResponse",
"VersionIdResponseVersion",
"VersionReferenceResponse",
- "VersionSpecification",
"VersionStatsResponse",
"VersionStatsResponseEvaluatorVersionStatsItem",
"VersionStatus",
diff --git a/src/humanloop/types/logs_association_type.py b/src/humanloop/types/logs_association_type.py
deleted file mode 100644
index c904b93c..00000000
--- a/src/humanloop/types/logs_association_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LogsAssociationType = typing.Union[typing.Literal["dynamic", "fixed"], typing.Any]
diff --git a/src/humanloop/types/version_specification.py b/src/humanloop/types/version_specification.py
deleted file mode 100644
index bb3464ce..00000000
--- a/src/humanloop/types/version_specification.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-import typing
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class VersionSpecification(UncheckedBaseModel):
- """
- Specification of a File version on Humanloop.
-
- This can be done in a couple of ways:
-
- - Specifying `version_id` directly.
- - Specifying a File (and optionally an Environment).
- - A File can be specified by either `path` or `file_id`.
- - An Environment can be specified by `environment_id`. If no Environment is specified, the default Environment is used.
- """
-
- version_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the File Version. If provided, none of the other fields should be specified.
- """
-
- path: typing.Optional[str] = pydantic.Field(default=None)
- """
- Path identifying a File. Provide either this or `file_id` if you want to specify a File.
- """
-
- file_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the File. Provide either this or `path` if you want to specify a File.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- Name of the Environment a Version is deployed to. Only provide this when specifying a File. If not provided (and a File is specified), the default Environment is used.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
From 4debaa0dea24cb9ae3b661aa8d9a5a2cd51bc267 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Fri, 25 Oct 2024 17:09:21 +0300
Subject: [PATCH 05/70] SDK Decorators V1
---
.fernignore | 5 +
.gitignore | 2 +-
poetry.lock | 756 +++++++++++++++++++++-
pyproject.toml | 11 +-
src/humanloop/__init__.py | 318 +++++----
src/humanloop/client.py | 55 +-
src/humanloop/decorators/__init__.py | 0
src/humanloop/decorators/flow.py | 95 +++
src/humanloop/decorators/helpers.py | 9 +
src/humanloop/decorators/prompt.py | 131 ++++
src/humanloop/decorators/tool.py | 205 ++++++
src/humanloop/flows/client.py | 8 +-
src/humanloop/otel/__init__.py | 80 +++
src/humanloop/otel/constants.py | 4 +
src/humanloop/otel/exporter.py | 133 ++++
src/humanloop/otel/helpers.py | 198 ++++++
src/humanloop/otel/processor.py | 173 +++++
src/humanloop/prompts/client.py | 4 +-
src/humanloop/requests/flow_response.py | 2 +-
src/humanloop/types/flow_response.py | 2 +-
tests/conftest.py | 114 ++++
tests/decorators/__init__.py | 0
tests/decorators/test_flow_decorator.py | 219 +++++++
tests/decorators/test_prompt_decorator.py | 97 +++
tests/decorators/test_tool_decorator.py | 43 ++
tests/otel/__init__.py | 0
tests/otel/test_helpers.py | 153 +++++
27 files changed, 2631 insertions(+), 186 deletions(-)
create mode 100644 src/humanloop/decorators/__init__.py
create mode 100644 src/humanloop/decorators/flow.py
create mode 100644 src/humanloop/decorators/helpers.py
create mode 100644 src/humanloop/decorators/prompt.py
create mode 100644 src/humanloop/decorators/tool.py
create mode 100644 src/humanloop/otel/__init__.py
create mode 100644 src/humanloop/otel/constants.py
create mode 100644 src/humanloop/otel/exporter.py
create mode 100644 src/humanloop/otel/helpers.py
create mode 100644 src/humanloop/otel/processor.py
create mode 100644 tests/conftest.py
create mode 100644 tests/decorators/__init__.py
create mode 100644 tests/decorators/test_flow_decorator.py
create mode 100644 tests/decorators/test_prompt_decorator.py
create mode 100644 tests/decorators/test_tool_decorator.py
create mode 100644 tests/otel/__init__.py
create mode 100644 tests/otel/test_helpers.py
diff --git a/.fernignore b/.fernignore
index 3a38428d..a24236fe 100644
--- a/.fernignore
+++ b/.fernignore
@@ -5,3 +5,8 @@ src/humanloop/prompt_utils.py
src/humanloop/client.py
mypy.ini
README.md
+
+# Directories used by SDK decorators
+
+src/humanloop/decorators/_
+src/humanloop/otel/_
diff --git a/.gitignore b/.gitignore
index 706998ab..2f7c4926 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,4 +6,4 @@ poetry.toml
.idea
.vscode
.DS_Store
-.env
\ No newline at end of file
+.env
diff --git a/poetry.lock b/poetry.lock
index 9eff35b0..1cd88b54 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -11,9 +11,6 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
-[package.dependencies]
-typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
-
[[package]]
name = "anyio"
version = "4.5.2"
@@ -47,6 +44,120 @@ files = [
{file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"},
]
+[[package]]
+name = "charset-normalizer"
+version = "3.4.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"},
+ {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"},
+ {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"},
+ {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"},
+ {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"},
+ {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"},
+ {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"},
+ {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"},
+ {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"},
+ {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
+]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -58,6 +169,34 @@ files = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+[[package]]
+name = "deprecated"
+version = "1.2.14"
+description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
+ {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
+]
+
+[package.dependencies]
+wrapt = ">=1.10,<2"
+
+[package.extras]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"]
+
+[[package]]
+name = "distro"
+version = "1.9.0"
+description = "Distro - an OS platform information API"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
+ {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
+]
+
[[package]]
name = "exceptiongroup"
version = "1.2.2"
@@ -154,6 +293,25 @@ files = [
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
+[[package]]
+name = "importlib-metadata"
+version = "8.4.0"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
+ {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
+]
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
+
[[package]]
name = "iniconfig"
version = "2.0.0"
@@ -165,6 +323,88 @@ files = [
{file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
]
+[[package]]
+name = "jiter"
+version = "0.6.1"
+description = "Fast iterable JSON parser."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"},
+ {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"},
+ {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"},
+ {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"},
+ {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"},
+ {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"},
+ {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"},
+ {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"},
+ {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"},
+ {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"},
+ {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"},
+ {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"},
+ {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"},
+ {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"},
+ {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"},
+ {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"},
+ {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"},
+ {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"},
+ {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"},
+ {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"},
+ {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"},
+ {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"},
+ {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"},
+ {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"},
+ {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"},
+ {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"},
+ {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"},
+ {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"},
+ {file = "jiter-0.6.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:31d8e00e1fb4c277df8ab6f31a671f509ebc791a80e5c61fdc6bc8696aaa297c"},
+ {file = "jiter-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77c296d65003cd7ee5d7b0965f6acbe6cffaf9d1fa420ea751f60ef24e85fed5"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeeb0c0325ef96c12a48ea7e23e2e86fe4838e6e0a995f464cf4c79fa791ceeb"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a31c6fcbe7d6c25d6f1cc6bb1cba576251d32795d09c09961174fe461a1fb5bd"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59e2b37f3b9401fc9e619f4d4badcab2e8643a721838bcf695c2318a0475ae42"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bae5ae4853cb9644144e9d0755854ce5108d470d31541d83f70ca7ecdc2d1637"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df588e9c830b72d8db1dd7d0175af6706b0904f682ea9b1ca8b46028e54d6e9"},
+ {file = "jiter-0.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15f8395e835cf561c85c1adee72d899abf2733d9df72e9798e6d667c9b5c1f30"},
+ {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a99d4e0b5fc3b05ea732d67eb2092fe894e95a90e6e413f2ea91387e228a307"},
+ {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a311df1fa6be0ccd64c12abcd85458383d96e542531bafbfc0a16ff6feda588f"},
+ {file = "jiter-0.6.1-cp38-none-win32.whl", hash = "sha256:81116a6c272a11347b199f0e16b6bd63f4c9d9b52bc108991397dd80d3c78aba"},
+ {file = "jiter-0.6.1-cp38-none-win_amd64.whl", hash = "sha256:13f9084e3e871a7c0b6e710db54444088b1dd9fbefa54d449b630d5e73bb95d0"},
+ {file = "jiter-0.6.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1c53615fcfec3b11527c08d19cff6bc870da567ce4e57676c059a3102d3a082"},
+ {file = "jiter-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f791b6a4da23238c17a81f44f5b55d08a420c5692c1fda84e301a4b036744eb1"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c97e90fec2da1d5f68ef121444c2c4fa72eabf3240829ad95cf6bbeca42a301"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3cbc1a66b4e41511209e97a2866898733c0110b7245791ac604117b7fb3fedb7"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4e85f9e12cd8418ab10e1fcf0e335ae5bb3da26c4d13a0fd9e6a17a674783b6"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08be33db6dcc374c9cc19d3633af5e47961a7b10d4c61710bd39e48d52a35824"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:677be9550004f5e010d673d3b2a2b815a8ea07a71484a57d3f85dde7f14cf132"},
+ {file = "jiter-0.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8bd065be46c2eecc328e419d6557bbc37844c88bb07b7a8d2d6c91c7c4dedc9"},
+ {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bd95375ce3609ec079a97c5d165afdd25693302c071ca60c7ae1cf826eb32022"},
+ {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db459ed22d0208940d87f614e1f0ea5a946d29a3cfef71f7e1aab59b6c6b2afb"},
+ {file = "jiter-0.6.1-cp39-none-win32.whl", hash = "sha256:d71c962f0971347bd552940ab96aa42ceefcd51b88c4ced8a27398182efa8d80"},
+ {file = "jiter-0.6.1-cp39-none-win_amd64.whl", hash = "sha256:d465db62d2d10b489b7e7a33027c4ae3a64374425d757e963f86df5b5f2e7fc5"},
+ {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"},
+]
+
[[package]]
name = "mypy"
version = "1.0.1"
@@ -222,6 +462,172 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
+[[package]]
+name = "openai"
+version = "1.52.2"
+description = "The official Python library for the openai API"
+optional = false
+python-versions = ">=3.7.1"
+files = [
+ {file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"},
+ {file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+jiter = ">=0.4.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+tqdm = ">4"
+typing-extensions = ">=4.11,<5"
+
+[package.extras]
+datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
+
+[[package]]
+name = "opentelemetry-api"
+version = "1.27.0"
+description = "OpenTelemetry Python API"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"},
+ {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"},
+]
+
+[package.dependencies]
+deprecated = ">=1.2.6"
+importlib-metadata = ">=6.0,<=8.4.0"
+
+[[package]]
+name = "opentelemetry-instrumentation"
+version = "0.48b0"
+description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"},
+ {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.4,<2.0"
+setuptools = ">=16.0"
+wrapt = ">=1.0.0,<2.0.0"
+
+[[package]]
+name = "opentelemetry-instrumentation-anthropic"
+version = "0.33.3"
+description = "OpenTelemetry Anthropic instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_anthropic-0.33.3-py3-none-any.whl", hash = "sha256:dc4110c6400708d600f79fd78e8e8fe04b90a82b44949817cc91c961cd4db6e7"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.3.tar.gz", hash = "sha256:d245f1c732caebe4706a4900084758296d1d46d37e042bbd8542d0aa0e691899"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+
+[[package]]
+name = "opentelemetry-instrumentation-cohere"
+version = "0.33.3"
+description = "OpenTelemetry Cohere instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_cohere-0.33.3-py3-none-any.whl", hash = "sha256:b0a614a321f332e31eb74980a603303123b58a3627a11e7db5f13a8b3c660311"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.3.tar.gz", hash = "sha256:9d940cb30b7e4be94f063f5afadeb2572f4cfe69a731d7c45faaa9f034991a5e"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+
+[[package]]
+name = "opentelemetry-instrumentation-mistralai"
+version = "0.33.3"
+description = "OpenTelemetry Mistral AI instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_mistralai-0.33.3-py3-none-any.whl", hash = "sha256:5e2eb745bf6e35ff6dbb24abe6b88a62978b07214f1a8b17e0629321bd385e0f"},
+ {file = "opentelemetry_instrumentation_mistralai-0.33.3.tar.gz", hash = "sha256:3b37aac02fe16acc3aa781140135a6c46cf0acaade73aad2820d38037927e788"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+
+[[package]]
+name = "opentelemetry-instrumentation-openai"
+version = "0.33.3"
+description = "OpenTelemetry OpenAI instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_openai-0.33.3-py3-none-any.whl", hash = "sha256:f5ef4452b269bb409cc260fd611834c33296495e39700fd6e6f83a1cef07b9fd"},
+ {file = "opentelemetry_instrumentation_openai-0.33.3.tar.gz", hash = "sha256:06ad92d5d852f93ee7c0d9b545a412df5265044dae4d6be7056a10fa8afb2fdc"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+tiktoken = ">=0.6.0,<1"
+
+[[package]]
+name = "opentelemetry-sdk"
+version = "1.27.0"
+description = "OpenTelemetry Python SDK"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"},
+ {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"},
+]
+
+[package.dependencies]
+opentelemetry-api = "1.27.0"
+opentelemetry-semantic-conventions = "0.48b0"
+typing-extensions = ">=3.7.4"
+
+[[package]]
+name = "opentelemetry-semantic-conventions"
+version = "0.48b0"
+description = "OpenTelemetry Semantic Conventions"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"},
+ {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"},
+]
+
+[package.dependencies]
+deprecated = ">=1.2.6"
+opentelemetry-api = "1.27.0"
+
+[[package]]
+name = "opentelemetry-semantic-conventions-ai"
+version = "0.4.1"
+description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"},
+]
+
[[package]]
name = "packaging"
version = "24.2"
@@ -426,6 +832,144 @@ files = [
[package.dependencies]
six = ">=1.5"
+[[package]]
+name = "python-dotenv"
+version = "1.0.1"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"},
+ {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
+[[package]]
+name = "regex"
+version = "2024.9.11"
+description = "Alternative regular expression module, to replace re."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"},
+ {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"},
+ {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"},
+ {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"},
+ {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"},
+ {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"},
+ {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"},
+ {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"},
+ {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"},
+ {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"},
+ {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"},
+ {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"},
+ {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"},
+ {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"},
+ {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"},
+ {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"},
+ {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"},
+ {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"},
+ {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"},
+ {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"},
+ {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"},
+ {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"},
+ {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"},
+ {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"},
+ {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"},
+ {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"},
+ {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"},
+ {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"},
+ {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"},
+ {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"},
+ {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"},
+ {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"},
+ {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"},
+ {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"},
+ {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"},
+ {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"},
+ {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"},
+ {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"},
+ {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"},
+ {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"},
+ {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"},
+ {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"},
+ {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"},
+ {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"},
+ {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"},
+ {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"},
+ {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"},
+ {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"},
+ {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"},
+ {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"},
+ {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"},
+ {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"},
+ {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"},
+ {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"},
+ {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"},
+ {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"},
+ {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"},
+ {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"},
+ {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"},
+ {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"},
+ {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"},
+ {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"},
+ {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"},
+ {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"},
+ {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"},
+ {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"},
+ {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"},
+ {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"},
+ {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"},
+ {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"},
+ {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"},
+ {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"},
+ {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"},
+ {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"},
+ {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"},
+ {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"},
+ {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"},
+ {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"},
+ {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"},
+]
+
+[[package]]
+name = "requests"
+version = "2.32.3"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
+ {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
[[package]]
name = "ruff"
version = "0.5.7"
@@ -453,6 +997,26 @@ files = [
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
+[[package]]
+name = "setuptools"
+version = "75.2.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"},
+ {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
+core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"]
+
[[package]]
name = "six"
version = "1.16.0"
@@ -475,6 +1039,53 @@ files = [
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
+[[package]]
+name = "tiktoken"
+version = "0.8.0"
+description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"},
+ {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"},
+ {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"},
+ {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"},
+ {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"},
+ {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"},
+ {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"},
+ {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"},
+ {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"},
+ {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"},
+ {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"},
+ {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"},
+ {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"},
+ {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"},
+ {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"},
+ {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"},
+ {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"},
+ {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"},
+ {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"},
+ {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"},
+ {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"},
+ {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"},
+ {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"},
+ {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"},
+ {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"},
+ {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"},
+ {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"},
+ {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"},
+ {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"},
+ {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"},
+ {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"},
+]
+
+[package.dependencies]
+regex = ">=2022.1.18"
+requests = ">=2.26.0"
+
+[package.extras]
+blobfile = ["blobfile (>=2)"]
+
[[package]]
name = "tomli"
version = "2.0.2"
@@ -486,6 +1097,26 @@ files = [
{file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
]
+[[package]]
+name = "tqdm"
+version = "4.66.5"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"},
+ {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
[[package]]
name = "types-python-dateutil"
version = "2.9.0.20241003"
@@ -508,7 +1139,122 @@ files = [
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
+[[package]]
+name = "urllib3"
+version = "2.2.3"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
+ {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+h2 = ["h2 (>=4,<5)"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
+
+[[package]]
+name = "wrapt"
+version = "1.16.0"
+description = "Module for decorators, wrappers and monkey patching."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"},
+ {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"},
+ {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"},
+ {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"},
+ {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"},
+ {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"},
+ {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"},
+ {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"},
+ {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"},
+ {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"},
+ {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"},
+ {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"},
+ {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"},
+ {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"},
+ {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"},
+ {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"},
+ {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"},
+ {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"},
+ {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"},
+ {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"},
+ {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"},
+ {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"},
+ {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"},
+ {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"},
+ {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"},
+ {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"},
+ {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"},
+ {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"},
+ {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"},
+ {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"},
+ {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"},
+ {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"},
+ {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"},
+ {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"},
+ {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"},
+ {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"},
+ {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"},
+ {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"},
+ {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"},
+ {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"},
+ {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"},
+ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"},
+]
+
+[[package]]
+name = "zipp"
+version = "3.20.2"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
+ {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+type = ["pytest-mypy"]
+
[metadata]
lock-version = "2.0"
-python-versions = "^3.8"
-content-hash = "2432f04327a2d8503e175bf13ddf16c3c5b9992b344c9b1e1faf3e444e388903"
+python-versions = "^3.9"
+content-hash = "01a2b6f6439c0390145f670ef2c65f4698d4df7af626906888692938ab7166d8"
diff --git a/pyproject.toml b/pyproject.toml
index b5eda9b1..621c54bc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,13 +31,20 @@ packages = [
Repository = 'https://github.com/humanloop/humanloop-python'
[tool.poetry.dependencies]
-python = "^3.8"
+python = "^3.9"
httpx = ">=0.21.2"
httpx-sse = "0.4.0"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
+opentelemetry-sdk = "^1.27.0"
+opentelemetry-api = "^1.27.0"
+opentelemetry-instrumentation-openai = "^0.33.3"
+opentelemetry-instrumentation-cohere = "^0.33.3"
+opentelemetry-instrumentation-anthropic = "^0.33.3"
+opentelemetry-instrumentation-mistralai = "^0.33.3"
+
[tool.poetry.dev-dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
@@ -45,6 +52,8 @@ pytest-asyncio = "^0.23.5"
python-dateutil = "^2.9.0"
types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
+python-dotenv = "^1.0.1"
+openai = "^1.52.2"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 4ce861b0..a0283dd2 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,174 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-from .types import (
- AgentConfigResponse,
- BaseModelsUserResponse,
- BooleanEvaluatorStatsResponse,
- ChatMessage,
- ChatMessageContent,
- ChatMessageContentItem,
- ChatRole,
- ChatToolType,
- CodeEvaluatorRequest,
- CommitRequest,
- ConfigToolResponse,
- CreateDatapointRequest,
- CreateDatapointRequestTargetValue,
- CreateEvaluatorLogResponse,
- CreateFlowLogResponse,
- CreatePromptLogResponse,
- CreateToolLogResponse,
- DashboardConfiguration,
- DatapointResponse,
- DatapointResponseTargetValue,
- DatasetResponse,
- DatasetsRequest,
- DirectoryResponse,
- DirectoryWithParentsAndChildrenResponse,
- DirectoryWithParentsAndChildrenResponseFilesItem,
- EnvironmentResponse,
- EnvironmentTag,
- EvaluateeRequest,
- EvaluateeResponse,
- EvaluationEvaluatorResponse,
- EvaluationLogResponse,
- EvaluationResponse,
- EvaluationRunResponse,
- EvaluationRunsResponse,
- EvaluationStats,
- EvaluationStatus,
- EvaluationsDatasetRequest,
- EvaluationsRequest,
- EvaluatorActivationDeactivationRequest,
- EvaluatorActivationDeactivationRequestActivateItem,
- EvaluatorActivationDeactivationRequestDeactivateItem,
- EvaluatorAggregate,
- EvaluatorArgumentsType,
- EvaluatorConfigResponse,
- EvaluatorFileId,
- EvaluatorFilePath,
- EvaluatorJudgmentNumberLimit,
- EvaluatorJudgmentOptionResponse,
- EvaluatorLogResponse,
- EvaluatorLogResponseJudgment,
- EvaluatorResponse,
- EvaluatorResponseSpec,
- EvaluatorReturnTypeEnum,
- EvaluatorVersionId,
- EvaluatorsRequest,
- ExternalEvaluatorRequest,
- FeedbackType,
- FileEnvironmentResponse,
- FileEnvironmentResponseFile,
- FileId,
- FilePath,
- FileRequest,
- FileType,
- FilesToolType,
- FlowKernelRequest,
- FlowLogResponse,
- FlowResponse,
- FunctionTool,
- FunctionToolChoice,
- HttpValidationError,
- HumanEvaluatorRequest,
- HumanEvaluatorRequestReturnType,
- ImageChatContent,
- ImageUrl,
- ImageUrlDetail,
- InputResponse,
- LinkedToolResponse,
- ListDatasets,
- ListEvaluators,
- ListFlows,
- ListPrompts,
- ListTools,
- LlmEvaluatorRequest,
- LogResponse,
- ModelEndpoints,
- ModelProviders,
- MonitoringEvaluatorEnvironmentRequest,
- MonitoringEvaluatorResponse,
- MonitoringEvaluatorState,
- MonitoringEvaluatorVersionRequest,
- NumericEvaluatorStatsResponse,
- ObservabilityStatus,
- OverallStats,
- PaginatedDataEvaluationLogResponse,
- PaginatedDataEvaluatorResponse,
- PaginatedDataFlowResponse,
- PaginatedDataLogResponse,
- PaginatedDataPromptResponse,
- PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
- PaginatedDatapointResponse,
- PaginatedDatasetResponse,
- PaginatedEvaluationResponse,
- PaginatedPromptLogResponse,
- PaginatedSessionResponse,
- PlatformAccessEnum,
- ProjectSortBy,
- PromptCallLogResponse,
- PromptCallResponse,
- PromptCallResponseToolChoice,
- PromptCallStreamResponse,
- PromptKernelRequest,
- PromptKernelRequestStop,
- PromptKernelRequestTemplate,
- PromptLogResponse,
- PromptLogResponseToolChoice,
- PromptResponse,
- PromptResponseStop,
- PromptResponseTemplate,
- ProviderApiKeys,
- ResponseFormat,
- ResponseFormatType,
- RunStatsResponse,
- RunStatsResponseEvaluatorStatsItem,
- RunVersionResponse,
- SelectEvaluatorStatsResponse,
- SortOrder,
- TextChatContent,
- TextEvaluatorStatsResponse,
- TimeUnit,
- ToolCall,
- ToolChoice,
- ToolFunction,
- ToolKernelRequest,
- ToolLogResponse,
- ToolResponse,
- TraceStatus,
- UpdateDatesetAction,
- UpdateEvaluationStatusRequest,
- UserResponse,
- Valence,
- ValidationError,
- ValidationErrorLocItem,
- VersionDeploymentResponse,
- VersionDeploymentResponseFile,
- VersionId,
- VersionIdResponse,
- VersionIdResponseVersion,
- VersionReferenceResponse,
- VersionStatsResponse,
- VersionStatsResponseEvaluatorVersionStatsItem,
- VersionStatus,
-)
-from .errors import UnprocessableEntityError
from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .client import AsyncHumanloop, Humanloop
from .environment import HumanloopEnvironment
-from .evaluations import (
- AddEvaluatorsRequestEvaluatorsItem,
- AddEvaluatorsRequestEvaluatorsItemParams,
- CreateEvaluationRequestEvaluatorsItem,
- CreateEvaluationRequestEvaluatorsItemParams,
- CreateRunRequestDataset,
- CreateRunRequestDatasetParams,
- CreateRunRequestVersion,
- CreateRunRequestVersionParams,
-)
+from .errors import UnprocessableEntityError
from .evaluators import (
CreateEvaluatorLogRequestJudgment,
CreateEvaluatorLogRequestJudgmentParams,
@@ -218,8 +53,8 @@
EvaluationEvaluatorResponseParams,
EvaluationLogResponseParams,
EvaluationResponseParams,
- EvaluationRunResponseParams,
- EvaluationRunsResponseParams,
+ EvaluationsDatasetRequestParams,
+ EvaluationsRequestParams,
EvaluationStatsParams,
EvaluatorActivationDeactivationRequestActivateItemParams,
EvaluatorActivationDeactivationRequestDeactivateItemParams,
@@ -268,12 +103,12 @@
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
+ PaginatedDatapointResponseParams,
PaginatedDataPromptResponseParams,
+ PaginatedDatasetResponseParams,
PaginatedDataToolResponseParams,
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
- PaginatedDatapointResponseParams,
- PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PromptCallLogResponseParams,
PromptCallResponseParams,
@@ -312,6 +147,149 @@
VersionStatsResponseEvaluatorVersionStatsItemParams,
VersionStatsResponseParams,
)
+from .types import (
+ AgentConfigResponse,
+ BaseModelsUserResponse,
+ BooleanEvaluatorStatsResponse,
+ ChatMessage,
+ ChatMessageContent,
+ ChatMessageContentItem,
+ ChatRole,
+ ChatToolType,
+ CodeEvaluatorRequest,
+ CommitRequest,
+ ConfigToolResponse,
+ CreateDatapointRequest,
+ CreateDatapointRequestTargetValue,
+ CreateEvaluatorLogResponse,
+ CreateFlowLogResponse,
+ CreatePromptLogResponse,
+ CreateToolLogResponse,
+ DashboardConfiguration,
+ DatapointResponse,
+ DatapointResponseTargetValue,
+ DatasetResponse,
+ DirectoryResponse,
+ DirectoryWithParentsAndChildrenResponse,
+ DirectoryWithParentsAndChildrenResponseFilesItem,
+ EnvironmentResponse,
+ EnvironmentTag,
+ EvaluatedVersionResponse,
+ EvaluateeRequest,
+ EvaluateeResponse,
+ EvaluationEvaluatorResponse,
+ EvaluationReportLogResponse,
+ EvaluationResponse,
+ EvaluationsDatasetRequest,
+ EvaluationsRequest,
+ EvaluationStats,
+ EvaluationStatus,
+ EvaluatorActivationDeactivationRequest,
+ EvaluatorActivationDeactivationRequestActivateItem,
+ EvaluatorActivationDeactivationRequestDeactivateItem,
+ EvaluatorAggregate,
+ EvaluatorArgumentsType,
+ EvaluatorConfigResponse,
+ EvaluatorJudgmentNumberLimit,
+ EvaluatorJudgmentOptionResponse,
+ EvaluatorLogResponse,
+ EvaluatorLogResponseJudgment,
+ EvaluatorResponse,
+ EvaluatorResponseSpec,
+ EvaluatorReturnTypeEnum,
+ ExternalEvaluatorRequest,
+ FeedbackType,
+ FileEnvironmentResponse,
+ FileEnvironmentResponseFile,
+ FileRequest,
+ FilesToolType,
+ FileType,
+ FlowKernelRequest,
+ FlowLogResponse,
+ FlowResponse,
+ FunctionTool,
+ FunctionToolChoice,
+ HttpValidationError,
+ HumanEvaluatorRequest,
+ HumanEvaluatorRequestReturnType,
+ ImageChatContent,
+ ImageUrl,
+ ImageUrlDetail,
+ InputResponse,
+ LinkedToolResponse,
+ ListDatasets,
+ ListEvaluators,
+ ListFlows,
+ ListPrompts,
+ ListTools,
+ LlmEvaluatorRequest,
+ LogResponse,
+ ModelEndpoints,
+ ModelProviders,
+ MonitoringEvaluatorEnvironmentRequest,
+ MonitoringEvaluatorResponse,
+ MonitoringEvaluatorState,
+ MonitoringEvaluatorVersionRequest,
+ NumericEvaluatorStatsResponse,
+ ObservabilityStatus,
+ OverallStats,
+ PaginatedDataEvaluationReportLogResponse,
+ PaginatedDataEvaluatorResponse,
+ PaginatedDataFlowResponse,
+ PaginatedDataLogResponse,
+ PaginatedDatapointResponse,
+ PaginatedDataPromptResponse,
+ PaginatedDatasetResponse,
+ PaginatedDataToolResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedEvaluationResponse,
+ PaginatedPromptLogResponse,
+ PaginatedSessionResponse,
+ PlatformAccessEnum,
+ ProjectSortBy,
+ PromptCallLogResponse,
+ PromptCallResponse,
+ PromptCallResponseToolChoice,
+ PromptCallStreamResponse,
+ PromptKernelRequest,
+ PromptKernelRequestStop,
+ PromptKernelRequestTemplate,
+ PromptLogResponse,
+ PromptLogResponseToolChoice,
+ PromptResponse,
+ PromptResponseStop,
+ PromptResponseTemplate,
+ ProviderApiKeys,
+ ResponseFormat,
+ ResponseFormatType,
+ SelectEvaluatorStatsResponse,
+ SortOrder,
+ TextChatContent,
+ TextEvaluatorStatsResponse,
+ TimeUnit,
+ ToolCall,
+ ToolChoice,
+ ToolFunction,
+ ToolKernelRequest,
+ ToolLogResponse,
+ ToolResponse,
+ TraceStatus,
+ UpdateDatesetAction,
+ UpdateEvaluationStatusRequest,
+ UserResponse,
+ Valence,
+ ValidationError,
+ ValidationErrorLocItem,
+ VersionDeploymentResponse,
+ VersionDeploymentResponseFile,
+ VersionIdResponse,
+ VersionIdResponseVersion,
+ VersionReferenceResponse,
+ VersionStatsResponse,
+ VersionStatsResponseEvaluatorVersionStatsItem,
+ VersionStatus,
+)
from .version import __version__
__all__ = [
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 17e8e41d..64f5e177 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -2,7 +2,18 @@
from typing import Optional, List, Sequence
import os
import httpx
-
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.trace import TracerProvider
+
+from .decorators.flow import flow
+from .decorators.prompt import prompt
+from .decorators.tool import tool
+from humanloop.core.client_wrapper import SyncClientWrapper
+from humanloop.flows.client import FlowsClient
+from humanloop.tools.client import ToolsClient
+from .otel.exporter import HumanloopSpanExporter
+from .otel.processor import HumanloopSpanProcessor
+from .otel import instrument_provider, set_tracer
from .base_client import BaseHumanloop, AsyncBaseHumanloop
from .environment import HumanloopEnvironment
from .eval_utils import _run_eval, Dataset, File, Evaluator, EvaluatorCheck
@@ -46,9 +57,30 @@ def run(
class ExtendedPromptsClient(PromptsClient):
+ def __init__(self, client_wrapper: SyncClientWrapper):
+ super().__init__(client_wrapper=client_wrapper)
+
+ decorate = staticmethod(prompt)
+ decorate.__doc__ = prompt.__doc__
populate_template = staticmethod(populate_template)
+class ExtendedToolsClient(ToolsClient):
+ def __init__(self, client_wrapper: SyncClientWrapper):
+ super().__init__(client_wrapper=client_wrapper)
+
+ decorate = staticmethod(tool)
+ decorate.__doc__ = tool.__doc__
+
+
+class ExtendedFlowsClient(FlowsClient):
+ def __init__(self, client_wrapper: SyncClientWrapper):
+ super().__init__(client_wrapper=client_wrapper)
+
+ decorate = staticmethod(flow)
+ decorate.__doc__ = flow.__doc__
+
+
class Humanloop(BaseHumanloop):
"""
See docstring of BaseHumanloop.
@@ -78,10 +110,31 @@ def __init__(
follow_redirects=follow_redirects,
httpx_client=httpx_client,
)
+
+ self._tracer_provider = TracerProvider(
+ resource=Resource(
+ attributes={
+ "instrumentor": "humanloop.sdk",
+ }
+ ),
+ )
+ instrument_provider(provider=self._tracer_provider)
+ self._tracer_provider.add_span_processor(
+ HumanloopSpanProcessor(
+ exporter=HumanloopSpanExporter(
+ client=self,
+ )
+ ),
+ )
+ tracer = self._tracer_provider.get_tracer("humanloop.sdk")
+ set_tracer(tracer)
+
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
+ self.flows = ExtendedFlowsClient(client_wrapper=self._client_wrapper)
+ self.tools = ExtendedToolsClient(client_wrapper=self._client_wrapper)
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/decorators/__init__.py b/src/humanloop/decorators/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
new file mode 100644
index 00000000..be49e395
--- /dev/null
+++ b/src/humanloop/decorators/flow.py
@@ -0,0 +1,95 @@
+import uuid
+from functools import wraps
+from typing import Any
+
+
+from humanloop.decorators.helpers import args_to_inputs
+from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
+from humanloop.otel.helpers import write_to_opentelemetry_span
+
+
+def flow(
+ path: str | None = None,
+ attributes: dict[str, Any] = {},
+):
+ """Decorator to log a Flow to the Humanloop API.
+
+ The decorator logs the inputs and outputs of the decorated function to
+ create a Log against the Flow in Humanloop.
+
+ The decorator is an entrypoint to the instrumented AI feature. Decorated
+ functions called in the context of function decorated with Flow will create
+ a Trace in Humanloop.
+
+ Arguments:
+ path: Optional. The path to the Flow. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+ attributes: Optional. The attributes of the Flow. The attributes are used
+ to version the Flow.
+ """
+
+ def decorator(func: callable):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_context()
+
+ if trace_metadata:
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_TRACE_METADATA_KEY,
+ value={
+ **trace_metadata,
+ "trace_id": span.get_span_context().span_id,
+ "is_flow_log": True,
+ },
+ )
+ else:
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_TRACE_METADATA_KEY,
+ value={
+ "trace_id": span.get_span_context().span_id,
+ "is_flow_log": True,
+ },
+ )
+ # Set this as the Flow to which Logs are appended
+ # Important: Flows might be nested under each other
+ push_trace_context(
+ {
+ "trace_id": span.get_span_context().span_id,
+ "trace_parent_id": span.get_span_context().span_id,
+ "is_flow_log": True,
+ },
+ )
+
+ result = func(*args, **kwargs)
+
+ pop_trace_context()
+
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_FILE_OT_KEY,
+ value={
+ "path": path if path else func.__name__,
+ # OT span attributes are dropped if they are empty or null
+ # Add 'EMPTY' token value otherwise the 'flow' key will be dropped
+ "flow": {"attributes": attributes} if attributes else OT_EMPTY_ATTRIBUTE,
+ },
+ )
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_OT_KEY,
+ value={
+ "inputs": args_to_inputs(func, args, kwargs),
+ "output": result,
+ },
+ )
+
+ return result
+
+ return wrapper
+
+ return decorator
diff --git a/src/humanloop/decorators/helpers.py b/src/humanloop/decorators/helpers.py
new file mode 100644
index 00000000..4c926a3f
--- /dev/null
+++ b/src/humanloop/decorators/helpers.py
@@ -0,0 +1,9 @@
+import inspect
+from typing import Any
+
+
+def args_to_inputs(func: callable, args: tuple, kwargs: dict) -> dict[str, Any]:
+ signature = inspect.signature(func)
+ bound_args = signature.bind(*args, **kwargs)
+ bound_args.apply_defaults()
+ return dict(bound_args.arguments)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
new file mode 100644
index 00000000..72e119c5
--- /dev/null
+++ b/src/humanloop/decorators/prompt.py
@@ -0,0 +1,131 @@
+import uuid
+from functools import wraps
+from typing import Literal
+
+from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel.helpers import write_to_opentelemetry_span
+
+
+def prompt(
+ path: str | None = None,
+ # TODO: Template can be a list of objects
+ model: str | None = None,
+ endpoint: Literal["chat", "edit", "complete"] | None = None,
+ template: str | None = None,
+ provider: Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
+ | None = None,
+ max_tokens: int | None = None,
+ stop: str | list[str] | None = None,
+ temperature: float | None = None,
+ top_p: float | None = None,
+ presence_penalty: float | None = None,
+ frequency_penalty: float | None = None,
+):
+ """Decorator to mark a function as a Humanloop Prompt.
+
+ The decorator intercepts calls to LLM provider APIs and uses them
+ in tandem with the template provided by the user to create a Prompt
+ in Humanloop.
+
+ Arguments:
+ path: Optional. The path where the Prompt is created. If not
+ provided, the function name is used as the path and
+ the File is created in the root of your Humanloop's
+ organization workspace.
+ template: The template for the Prompt. This is the text of
+ the system message used to set the LLM prompt. The template
+ accepts template slots using the format `{slot_name}`.
+
+ The text of the system message is matched against the template
+ to extract the slot values. The extracted values will be
+ available in the Log's inputs
+ """
+
+ def decorator(func: callable):
+ decorator.__hl_file_id = uuid.uuid4()
+
+ if temperature is not None:
+ if not 0 <= temperature < 1:
+ raise ValueError(f"{func.__name__}: Temperature parameter must be between 0 and 1")
+
+ if top_p is not None:
+ if not 0 <= top_p <= 1:
+ raise ValueError(f"{func.__name__}: Top-p parameter must be between 0 and 1")
+
+ if presence_penalty is not None:
+ if not -2 <= presence_penalty <= 2:
+ raise ValueError(f"{func.__name__}: Presence penalty parameter must be between -2 and 2")
+
+ if frequency_penalty is not None:
+ if not -2 <= frequency_penalty <= 2:
+ raise ValueError(f"{func.__name__}: Frequency penalty parameter must be between -2 and 2")
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_context()
+
+ if trace_metadata:
+ # We are in a Flow context
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_TRACE_METADATA_KEY,
+ value={**trace_metadata, "is_flow_log": False},
+ )
+ # Set current Prompt to act as parent for Logs nested underneath
+ push_trace_context(
+ {
+ **trace_metadata,
+ "trace_parent_id": span.get_span_context().span_id,
+ "is_flow_log": False,
+ },
+ )
+
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_FILE_OT_KEY,
+ value={
+ "path": path if path else func.__name__,
+ "prompt": {
+ "template": template,
+ "temperature": temperature,
+ "top_p": top_p,
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "model": model,
+ "endpoint": endpoint,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "stop": stop,
+ },
+ },
+ )
+
+ try:
+ output = func(*args, **kwargs)
+ except Exception as e:
+ # TODO Some fails coming from here, they result in a fast end or duplicate
+ # spans outputted to the Humanloop API
+ print(e)
+ span.record_exception(e)
+ output = None
+
+ if trace_metadata:
+ # Go back to previous trace context in Trace context
+ pop_trace_context()
+
+ hl_log = {}
+ if output:
+ hl_log["output"] = output
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_OT_KEY,
+ value=hl_log,
+ )
+
+ return output
+
+ return wrapper
+
+ return decorator
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
new file mode 100644
index 00000000..e53ed85f
--- /dev/null
+++ b/src/humanloop/decorators/tool.py
@@ -0,0 +1,205 @@
+import builtins
+import inspect
+import textwrap
+import typing
+import uuid
+from functools import wraps
+
+
+from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel.helpers import write_to_opentelemetry_span
+
+from .helpers import args_to_inputs
+
+
+def _type_to_schema(type_hint):
+ match type_hint:
+ case builtins.int:
+ return "number"
+ case builtins.float:
+ return "number"
+ case builtins.bool:
+ return "boolean"
+ case builtins.str:
+ return "string"
+ case builtins.dict:
+ return "object"
+ case _:
+ raise ValueError(f"Unsupported type hint: {type_hint}")
+
+
+def _handle_dict_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+ try:
+ type_key, type_value = typing.get_args(parameter.annotation)
+ except ValueError:
+ raise ValueError("Dict annotation must have two type hints")
+ if type_key not in (builtins.str, builtins.int, typing.Literal, builtins.float):
+ raise ValueError("Dict keys must be strings or integers", parameter.name, type_key)
+ if type_value not in (
+ builtins.str,
+ builtins.int,
+ typing.Literal,
+ builtins.float,
+ dict,
+ ):
+ raise ValueError("Dict values must be strings or integers", parameter.name, type_value)
+ return {
+ "type": "object",
+ "properties": {
+ "key": {"type": _type_to_schema(type_key)},
+ "value": {"type": _type_to_schema(type_value)},
+ },
+ }
+
+
+def _handle_list_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+ try:
+ list_type = typing.get_args(parameter.annotation)[0]
+ except ValueError:
+ raise ValueError("List annotation must have one type hint")
+ return {
+ "type": "array",
+ "items": {"type": _type_to_schema(list_type)},
+ }
+
+
+def _handle_union_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+ union_types = [sub_type for sub_type in typing.get_args(parameter.annotation) if sub_type != type(None)]
+ if len(union_types) != 1:
+ raise ValueError("Union types are not supported. Try passing a string and parsing inside function")
+ return {"type": _type_to_schema(union_types[0])}
+
+
+def _handle_simple_type(parameter: inspect.Parameter) -> dict[str, object]:
+ if parameter.annotation is None:
+ raise ValueError("Parameters must have type hints")
+ return {"type": _type_to_schema(parameter.annotation)}
+
+
+def _parse_tool_parameters_schema(func) -> dict[str, dict]:
+ # TODO: Add tests for this, 100% it is breakable
+ signature = inspect.signature(func)
+ required = []
+ parameters_schema = {"type": "object", "properties": {}, "required": []}
+ for parameter in signature.parameters.values():
+ if parameter.kind in (
+ inspect.Parameter.VAR_POSITIONAL,
+ inspect.Parameter.VAR_KEYWORD,
+ ):
+ raise ValueError("Varargs and kwargs are not supported")
+ match typing.get_origin(parameter.annotation):
+ case builtins.dict:
+ param_schema = _handle_dict_annotation(parameter)
+ parameters_schema["required"].append(parameter.name)
+ required.append(parameter.name)
+ case builtins.list:
+ param_schema = _handle_list_annotation(parameter)
+ parameters_schema["required"].append(parameter.name)
+ required.append(parameter.name)
+ case typing.Union:
+ param_schema = _handle_union_annotation(parameter)
+ case None:
+ param_schema = _handle_simple_type(parameter)
+ required.append(parameter.name)
+ case _:
+ raise ValueError("Unsupported type hint ", parameter)
+ parameters_schema["properties"][parameter.name] = param_schema
+ parameters_schema["required"] = required
+ return parameters_schema
+
+
+def _tool_json_schema(func: callable):
+ tool_name = func.__name__
+ description = func.__doc__
+ if description is None:
+ description = ""
+ return {
+ "name": tool_name,
+ "description": description,
+ "parameters": _parse_tool_parameters_schema(func),
+ }
+
+
+def _extract_tool_kernel(func: callable) -> dict:
+ return {
+ "source_code": textwrap.dedent(
+ # Remove the tool decorator from source code
+ inspect.getsource(func).split("\n", maxsplit=1)[1]
+ ),
+ "function": _tool_json_schema(func),
+ "tool_type": "json_schema",
+ "strict": True,
+ }
+
+
+def tool(path: str | None = None, attributes: dict[str, typing.Any] | None = None):
+ """Decorator to mark a function as a Humanloop Tool.
+
+ The decorator inspect the wrapped function signature and code to infer
+ the File kernel and JSON schema for the Tool. Any change to the decorated
+ function will create a new version of the Tool, provided that the path
+ remains the same.
+
+ Every call to the decorated function will create a Log against the Tool.
+
+ Arguments:
+ path: Optional. The path to the Tool. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+ """
+
+ def decorator(func: callable):
+ func.json_schema = _tool_json_schema(func)
+ decorator.__hl_file_id = uuid.uuid4()
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_context()
+
+ if trace_metadata:
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_TRACE_METADATA_KEY,
+ value={**trace_metadata, "is_flow_log": False},
+ )
+ push_trace_context(
+ {
+ **trace_metadata,
+ "trace_parent_id": span.get_span_context().span_id,
+ "is_flow_log": False,
+ }
+ )
+
+ output = func(*args, **kwargs)
+ if trace_metadata:
+ pop_trace_context()
+
+ tool_log = {
+ "inputs": args_to_inputs(func, args, kwargs),
+ }
+ if output:
+ tool_log["output"] = output
+
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_FILE_OT_KEY,
+ value={
+ "path": path if path else func.__name__,
+ "tool": {
+ **_extract_tool_kernel(func),
+ "attributes": attributes,
+ },
+ },
+ )
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_OT_KEY,
+ value=tool_log,
+ )
+ return output
+
+ return wrapper
+
+ return decorator
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 1884b45c..4b4671e7 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -193,10 +193,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1357,10 +1357,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 22:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
new file mode 100644
index 00000000..3efcc894
--- /dev/null
+++ b/src/humanloop/otel/__init__.py
@@ -0,0 +1,80 @@
+from opentelemetry import baggage
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.trace import Tracer
+
+from humanloop.otel.constants import HL_TRACE_METADATA_KEY
+from humanloop.otel.helpers import module_is_installed
+
+
+_TRACER = None
+
+_BAGGAGE_CONTEXT = [{}]
+
+
+def set_tracer(tracer: Tracer):
+ global _TRACER
+ _TRACER = tracer
+
+
+def get_tracer() -> Tracer:
+ assert _TRACER is not None, "Internal error: OTT Tracer should have been set in the client"
+ return _TRACER
+
+
+def instrument_provider(provider: TracerProvider):
+ """Add Instrumentors to the TracerProvider.
+
+ Instrumentors add extra spans which are merged in Humanloop Span logs.
+ """
+ if module_is_installed("openai"):
+ from opentelemetry.instrumentation.openai import OpenAIInstrumentor
+
+ OpenAIInstrumentor().instrument(tracer_provider=provider)
+
+ if module_is_installed("cohere"):
+ from opentelemetry.instrumentation.cohere import CohereInstrumentor
+
+ CohereInstrumentor().instrument(tracer_provider=provider)
+
+ if module_is_installed("anthropic"):
+ from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
+
+ AnthropicInstrumentor().instrument(tracer_provider=provider)
+
+ if module_is_installed("mistralai"):
+ from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
+
+ MistralAiInstrumentor().instrument(tracer_provider=provider)
+
+
+def push_trace_context(trace_metadata: dict):
+ """Set metadata for Trace parent.
+
+ Used before the wrapped function is executed. All decorated functions
+ called from the decorated function will use this metadata to determine
+ the Log it should be associated to in Flow Trace.
+ """
+ global _BAGGAGE_CONTEXT
+ new_context = baggage.set_baggage(
+ HL_TRACE_METADATA_KEY,
+ trace_metadata,
+ _BAGGAGE_CONTEXT[-1],
+ )
+ _BAGGAGE_CONTEXT.append(new_context)
+
+
+def pop_trace_context():
+ """Clear Trace parent metadata.
+
+ Used after the wrapped function has been executed.
+ """
+ global _BAGGAGE_CONTEXT
+ _BAGGAGE_CONTEXT.pop()
+
+
+def get_trace_context() -> dict:
+ """Get Trace parent metadata for Flows."""
+
+ global _BAGGAGE_CONTEXT
+
+ return baggage.get_baggage(HL_TRACE_METADATA_KEY, _BAGGAGE_CONTEXT[-1])
diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py
new file mode 100644
index 00000000..879d2eff
--- /dev/null
+++ b/src/humanloop/otel/constants.py
@@ -0,0 +1,4 @@
+HL_FILE_OT_KEY = "humanloop.file"
+HL_LOG_OT_KEY = "humanloop.log"
+HL_TRACE_METADATA_KEY = "humanloop.flow.metadata"
+OT_EMPTY_ATTRIBUTE = "EMPTY"
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
new file mode 100644
index 00000000..a904a1eb
--- /dev/null
+++ b/src/humanloop/otel/exporter.py
@@ -0,0 +1,133 @@
+import typing
+from opentelemetry import trace
+from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
+
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
+from humanloop.otel.helpers import read_from_opentelemetry_span
+
+if typing.TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+
+class HumanloopSpanExporter(SpanExporter):
+ """SpanExporter that uploads OpenTelemetry spans to Humanloop Humanloop spans."""
+
+ def __init__(self, client: "BaseHumanloop") -> None:
+ super().__init__()
+ self._client = client
+ self._uploaded_log_ids = {}
+ self._upload_queue = []
+
+ def _export_prompt(self, span: Span) -> None:
+ file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ try:
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+
+ except KeyError:
+ trace_metadata = None
+ if trace_metadata:
+ trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
+ else:
+ trace_parent_id = None
+ prompt = file_object["prompt"]
+ path = file_object["path"]
+ response = self._client.prompts.log(
+ path=path,
+ prompt=prompt,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ )
+ self._uploaded_log_ids[span.context.span_id] = response.id
+
+ def _export_tool(self, span: Span) -> None:
+ file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ try:
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ except KeyError:
+ trace_metadata = None
+ if trace_metadata:
+ trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
+ else:
+ trace_parent_id = None
+ tool = file_object["tool"]
+ path = file_object["path"]
+ response = self._client.tools.log(
+ path=path,
+ tool=tool,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ )
+ self._uploaded_log_ids[span.context.span_id] = response.id
+
+ def _export_flow(self, span: Span) -> None:
+ file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ try:
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ except KeyError:
+ trace_metadata = None
+ if trace_metadata and "trace_parent_id" in trace_metadata:
+ trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
+ else:
+ trace_parent_id = None
+ flow = file_object["flow"]
+ if flow == OT_EMPTY_ATTRIBUTE:
+ flow = {
+ "attributes": {},
+ }
+ path = file_object["path"]
+ response = self._client.flows.log(
+ path=path,
+ flow=flow,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ )
+ self._uploaded_log_ids[span.context.span_id] = response.id
+
+ def _export_dispatch(self, span: Span) -> None:
+ hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+
+ if "prompt" in hl_file:
+ export_func = self._export_prompt
+ elif "tool" in hl_file:
+ export_func = self._export_tool
+ elif "flow" in hl_file:
+ export_func = self._export_flow
+ else:
+ raise NotImplementedError(f"Unknown span type: {hl_file}")
+ export_func(span=span)
+
+ def export(self, spans: trace.Sequence[Span]) -> SpanExportResult:
+ # TODO: Put this on a separate thread
+ for span in spans:
+ try:
+ flow_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ except KeyError:
+ flow_metadata = None
+ if flow_metadata:
+ # Span is part of a Flow, queue up Spans for upload until the Trace Head is exported.
+ # The spans arrive at the Exporter in reverse order or creation, as they end.
+ # We insert them at the front of the queue so that they are processed in the correct order
+ self._upload_queue.insert(0, span)
+ if (
+ flow_metadata["is_flow_log"]
+ # The Flow might be nested in another Flow
+ # i.e. has trace_parent_id set.
+ # Wait until the top level Flow is exported
+ and "trace_parent_id" not in flow_metadata
+ ):
+ # TODO: Add threading to this: sibling Spans on the same
+ # depth level in the Trace can be uploaded in parallel
+ while len(self._upload_queue) > 0:
+ span = self._upload_queue.pop(0)
+ self._export_dispatch(span)
+ else:
+ # Span is not part of Flow, upload as singular
+ self._export_dispatch(span)
+
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
+ # TODO: When implementing the multi-threaded version of export, this will need to be updated
+ return True
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
new file mode 100644
index 00000000..0e60cf11
--- /dev/null
+++ b/src/humanloop/otel/helpers.py
@@ -0,0 +1,198 @@
+import builtins
+from typing import Any
+
+from opentelemetry.sdk.trace import Span
+
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+
+
+def write_to_opentelemetry_span(span: Span, value: Any, key: str = "") -> None:
+ """Reverse of read_from_opentelemetry_span. Writes a Python object to the OpenTelemetry Span's attributes.
+
+ See `read_from_opentelemetry_span` for more information.
+
+ Arguments:
+ span: OpenTelemetry span to write values to
+ value: Python object to write to the span attributes. Can also be a primitive value.
+ key: Key prefix to write to the span attributes. The path to the values does not
+ need to exist in the span attributes.
+ """
+ to_write = dict()
+ _linear_object(to_write, value)
+ for k, v in to_write.items():
+ # OTT
+ if v is not None:
+ span._attributes[f"{key}.{k}" if key != "" else k] = v
+ # with _cache_lock:
+ # _cache[(span.context.span_id, key)] = value
+
+
+def read_from_opentelemetry_span(span: Span, key: str = "") -> dict | list:
+ """Read a value from the OpenTelemetry span attributes.
+
+ OpenTelemetry liniarises dictionaries and lists, storing only primitive values
+ in the span attributes. This function reconstructs the original structure from
+ a key prefix.
+
+ Arguments:
+ span: OpenTelemetry span to read values from
+ key: Key prefix to read from the span attributes
+
+ Returns:
+ Python object stored in the span attributes under the key prefix.
+
+ Examples:
+ `span.attributes` contains the following attributes:
+ ```python
+ foo.x.y = 7
+ foo.x.z.a = 'hello'
+ foo.x.z.b = 'world'
+ baz.0 = 42
+ baz.1 = 43
+ ```
+
+ `read_from_opentelemetry_span(span, key='foo')` returns:
+ ```python
+ {
+ 'x': {
+ 'y': 7,
+ 'z': {
+ 'a': 'hello',
+ 'b': 'world'
+ }
+ }
+ }
+ ```
+
+ `read_from_opentelemetry_span(span, key='foo.x')` returns:
+ ```python
+ {
+ 'y': 7,
+ 'z': {
+ 'a': 'hello',
+ 'b': 'world'
+ }
+ }
+ ```
+
+ `read_from_opentelemetry_span(span, key='baz')` returns:
+ ```python
+ [42, 43]
+ ```
+ """
+
+ result = dict()
+
+ to_process: list[tuple[str, Any]] = []
+ for span_key, span_value in span._attributes.items():
+ if key == "":
+ # No key prefix, add to root
+ to_process.append((f"{key}.{span_key}", span_value))
+ elif span_key.startswith(key):
+ # Remove the key prefix and the first dot
+ to_process.append((span_key, span_value))
+
+ if not to_process:
+ if key == "":
+ # Empty span attributes
+ return result
+ raise KeyError(f"Key {key} not found in span attributes")
+
+ for span_key, span_value in to_process:
+ parts = span_key.split(".")
+ len_parts = len(parts)
+ sub_result = result
+ for idx, part in enumerate(parts):
+ if idx == len_parts - 1:
+ sub_result[part] = span_value
+ else:
+ if part not in sub_result:
+ sub_result[part] = dict()
+ sub_result = sub_result[part]
+
+ result = _dict_to_list(result)
+ for part in key.split("."):
+ result = result[part]
+ return result
+
+
+def _linear_object(obj: dict, current: dict | list | Any, key: str = ""):
+ """Linearise a Python object into a dictionary.
+
+ Method recurses on the `current` argument, collecting all primitive values and their
+ path in the objects, then storing them in the `obj` dictionary in the end.
+
+ Arguments:
+ obj: Dictionary to store the linearised object
+ current: Python object to linearise. Used in recursivity when a complex
+ value is encountered.
+ key: Key prefix to store the values in the `obj` dictionary. Keys are added
+ incrementally as the function recurses.
+
+ Examples:
+ ```python
+ result = dict()
+ _linear_object(result, {'a': 1, 'b': {'c': 2, d: [4, 5]}})
+
+ # result is now:
+ {
+ 'a': 1,
+ 'b.c': 2,
+ 'b.d.0': 4,
+ 'b.d.1': 5
+ }
+ ```
+
+ """
+ match type(current):
+ case builtins.dict:
+ for k, v in current.items():
+ _linear_object(obj, v, f"{key}.{k}" if key != "" else k)
+ case builtins.list:
+ for idx, v in enumerate(current):
+ _linear_object(obj, v, f"{key}.{idx}" if key != "" else str(idx))
+ case _:
+ obj[key] = current
+
+
+def _dict_to_list(d: dict[str, Any]) -> dict | list:
+ """Interpret number keys parsed by the read_from_opentelemetry_span function as lists.
+
+ read_from_opentelemetry_span assumes all sub-keys in a path such as foo.0.bar are keys in
+ dictionaries. This method revisits the final result, and transforms the keys in lists where
+ appropriate.
+ """
+ is_list = all(key.isdigit() for key in d.keys())
+ if is_list:
+ return [_dict_to_list(val) if isinstance(val, dict) else val for val in d.values()]
+ for key, value in d.items():
+ if isinstance(value, dict):
+ d[key] = _dict_to_list(value)
+ return d
+
+
+def is_llm_provider_call(span: Span) -> bool:
+ """Determines if the span was created by an Instrumentor for LLM provider clients."""
+ return "llm.request.type" in span.attributes
+
+
+def is_humanloop_span(span: Span) -> bool:
+ """Determines if the span was created by the Humanloop SDK."""
+ try:
+ read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ except KeyError:
+ return False
+ return True
+
+
+def module_is_installed(module_name: str) -> bool:
+ """Returns true if the current Python environment has the module installed.
+
+ Used to check if a library that is instrumentable exists in the current environment.
+ """
+ try:
+ __import__(module_name)
+ except ImportError:
+ return False
+ return True
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
new file mode 100644
index 00000000..ce45933f
--- /dev/null
+++ b/src/humanloop/otel/processor.py
@@ -0,0 +1,173 @@
+import json
+import logging
+from collections import defaultdict
+
+import parse
+from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
+
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+from humanloop.otel.helpers import (
+ is_humanloop_span,
+ is_llm_provider_call,
+ read_from_opentelemetry_span,
+ write_to_opentelemetry_span,
+)
+
+
+class HumanloopSpanProcessor(SimpleSpanProcessor):
+ """Merge information from Instrumentors used by Humanloop SDK into the
+ Spans that will be exported to Humanloop.
+ """
+
+ def __init__(self, exporter: SpanExporter) -> None:
+ super().__init__(exporter)
+ self._spans = dict()
+ self._children = defaultdict(list)
+
+ # TODO: Could override on_start and process Flow spans ahead of time
+ # and PATCH the created Logs in on_end. A special type of Span could be
+ # used for this
+
+ def on_end(self, span: Span) -> None:
+ if is_humanloop_span(span=span):
+ _process_humanloop_span(span, self._children[span.context.span_id])
+ del self._children[span.context.span_id]
+ self.span_exporter.export([span])
+ else:
+ if span.parent is not None and _is_instrumentor_span(span):
+ self._children[span.parent.span_id].append(span)
+
+
+def _is_instrumentor_span(span: Span) -> bool:
+ # TODO: Extend in the future as needed. Spans not coming from
+ # Instrumentors of interest should be dropped
+ return is_llm_provider_call(span=span)
+
+
+def _process_humanloop_span(span: Span, children_spans: list[Span]):
+ hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+
+ if "prompt" in hl_file:
+ _process_prompt(prompt_span=span, children_spans=children_spans)
+ return
+ elif "tool" in hl_file:
+ _process_tool(tool_span=span, children_spans=children_spans)
+ return
+ elif "flow" in hl_file:
+ _process_flow(flow_span=span, children_spans=children_spans)
+ return
+ else:
+ logging.error("Invalid span type")
+
+
+def _process_prompt(prompt_span: Span, children_spans: list[Span]):
+ if len(children_spans) == 0:
+ return
+ child_span = children_spans[0]
+ assert is_llm_provider_call(child_span)
+ _enrich_prompt_span_file(prompt_span, child_span)
+ _enrich_prompt_span_log(prompt_span, child_span)
+
+
+def _process_tool(tool_span: Span, children_spans: list[Span]):
+ # TODO: Use children_spans in the future
+ tool_log = read_from_opentelemetry_span(tool_span, key=HL_LOG_OT_KEY)
+ tool_log["start_time"] = tool_span.start_time / 1e9
+ tool_log["end_time"] = tool_span.end_time / 1e9
+ tool_log["created_at"] = tool_span.end_time / 1e9
+
+ write_to_opentelemetry_span(
+ span=tool_span,
+ key=HL_LOG_OT_KEY,
+ value=tool_log,
+ )
+
+
+def _process_flow(flow_span: Span, children_spans: list[Span]):
+ # TODO: Use children_spans in the future
+ flow_log = read_from_opentelemetry_span(flow_span, key=HL_LOG_OT_KEY)
+ flow_log["start_time"] = flow_span.start_time / 1e9
+ flow_log["end_time"] = flow_span.end_time / 1e9
+ flow_log["created_at"] = flow_span.end_time / 1e9
+
+ write_to_opentelemetry_span(
+ span=flow_span,
+ key=HL_LOG_OT_KEY,
+ value=flow_log,
+ )
+
+
+def _enrich_prompt_span_file(prompt_span: Span, llm_provider_call_span: Span):
+ hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
+ gen_ai_object = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
+ llm_object = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
+
+ prompt_kernel = hl_file.get("prompt", {})
+ if "model" not in prompt_kernel:
+ prompt_kernel["model"] = gen_ai_object.get("request", {}).get("model", None)
+ if "endpoint" not in prompt_kernel:
+ prompt_kernel["endpoint"] = llm_object.get("request", {}).get("type")
+ if "template" not in prompt_kernel:
+ prompt_kernel["template"] = hl_file.get("prompt", {}).get("template", None)
+ if "provider" not in prompt_kernel:
+ prompt_kernel["provider"] = gen_ai_object.get("system", None)
+ if "temperature" not in prompt_kernel:
+ prompt_kernel["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
+ if "top_p" not in prompt_kernel:
+ prompt_kernel["top_p"] = gen_ai_object.get("request", {}).get("top_p", None)
+ if "max_tokens" not in prompt_kernel:
+ prompt_kernel["max_tokens"] = gen_ai_object.get("request", {}).get("max_tokens", None)
+ if "presence_penalty" not in prompt_kernel:
+ prompt_kernel["presence_penalty"] = llm_object.get("presence_penalty", None)
+ if "frequency_penalty" not in prompt_kernel:
+ prompt_kernel["frequency_penalty"] = llm_object.get("frequency_penalty", None)
+
+ write_to_opentelemetry_span(
+ span=prompt_span,
+ key=HL_FILE_OT_KEY,
+ # hl_file was modified in place via prompt_kernel reference
+ value=hl_file,
+ )
+
+
+def _enrich_prompt_span_log(prompt_span: Span, llm_provider_call_span: Span) -> Span:
+ hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
+ hl_log = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
+ gen_ai_object = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
+
+ # TODO: Seed not added by Instrumentors in provider call
+
+ if "output_tokens" not in hl_log:
+ hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens")
+ if len(gen_ai_object.get("completion", [])) > 0:
+ hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason")
+ hl_log["messages"] = gen_ai_object.get("prompt", [])
+
+ hl_log["start_time"] = prompt_span.start_time / 1e9
+ hl_log["end_time"] = prompt_span.start_time / 1e9
+ hl_log["created_at"] = prompt_span.start_time / 1e9
+
+ try:
+ inputs = {}
+ system_message = gen_ai_object["prompt"][0]["content"]
+ template = hl_file["prompt"]["template"]
+ parsed = parse.parse(template, system_message)
+ for key, value in parsed.named.items():
+ try:
+ parsed_value = json.loads(value.replace("'", '"'))
+ except json.JSONDecodeError:
+ parsed_value = value
+ inputs[key] = parsed_value
+ except Exception as e:
+ logging.error(e)
+ inputs = {}
+ finally:
+ hl_log["inputs"] = inputs
+
+ write_to_opentelemetry_span(
+ span=prompt_span,
+ key=HL_LOG_OT_KEY,
+ # hl_log was modified in place
+ value=hl_log,
+ )
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index 22e2747f..88cfa117 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -232,7 +232,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2097,7 +2097,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-19 00:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 60b7753a..8e727f19 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -27,7 +27,7 @@ class FlowResponseParams(typing_extensions.TypedDict):
id: str
"""
- Unique identifier for the Flow. Starts with fl\_.
+ Unique identifier for the Flow. Starts with fl_.
"""
directory_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 874782a1..5f8c0254 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -25,7 +25,7 @@ class FlowResponse(UncheckedBaseModel):
id: str = pydantic.Field()
"""
- Unique identifier for the Flow. Starts with fl\_.
+ Unique identifier for the Flow. Starts with fl_.
"""
directory_id: typing.Optional[str] = pydantic.Field(default=None)
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 00000000..e5056ee5
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,114 @@
+from typing import Generator
+from unittest.mock import MagicMock
+
+import pytest
+from opentelemetry import trace
+from opentelemetry.instrumentation.openai import OpenAIInstrumentor
+from opentelemetry.sdk.resources import Resource
+from opentelemetry.sdk.trace import Tracer, TracerProvider
+from opentelemetry.sdk.trace.export import SimpleSpanProcessor
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+
+from humanloop import otel as INTERNAL_OT
+from humanloop.otel.exporter import HumanloopSpanExporter
+from humanloop.otel.processor import HumanloopSpanProcessor
+
+
+@pytest.fixture(scope="function")
+def test_span():
+ exporter = InMemorySpanExporter()
+ processor = SimpleSpanProcessor(exporter)
+ provider = TracerProvider()
+ provider.add_span_processor(processor)
+ trace.set_tracer_provider(provider)
+ tracer = trace.get_tracer("test")
+ return tracer.start_span("test_span")
+
+
+@pytest.fixture(scope="function")
+def opentelemetry_test_provider() -> TracerProvider:
+ provider = TracerProvider(
+ resource=Resource.create(
+ {
+ "service": "humanloop.sdk",
+ "environment": "test",
+ }
+ )
+ )
+ return provider
+
+
+@pytest.fixture(scope="function")
+def opentelemetry_test_configuration(
+ opentelemetry_test_provider: TracerProvider,
+) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
+ exporter = InMemorySpanExporter()
+ processor = SimpleSpanProcessor(exporter)
+ opentelemetry_test_provider.add_span_processor(processor)
+ instrumentor = OpenAIInstrumentor()
+ instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ tracer = opentelemetry_test_provider.get_tracer("test")
+ # Circumvent configuration procedure
+ INTERNAL_OT._TRACER = tracer
+
+ yield tracer, exporter
+
+ instrumentor.uninstrument()
+ INTERNAL_OT._TRACER = None
+
+
+@pytest.fixture(scope="function")
+def opentelemetry_hl_test_configuration(
+ opentelemetry_test_provider: TracerProvider,
+) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
+ exporter = InMemorySpanExporter()
+ processor = HumanloopSpanProcessor(exporter=exporter)
+ opentelemetry_test_provider.add_span_processor(processor)
+ instrumentor = OpenAIInstrumentor()
+ instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ tracer = opentelemetry_test_provider.get_tracer("test")
+ INTERNAL_OT._TRACER = tracer
+
+ yield tracer, exporter
+
+ instrumentor.uninstrument()
+ INTERNAL_OT._TRACER = None
+
+
+@pytest.fixture(scope="function")
+def hl_test_exporter() -> HumanloopSpanExporter:
+ client = MagicMock()
+ exporter = HumanloopSpanExporter(client=client)
+ return exporter
+
+
+@pytest.fixture(scope="function")
+def opentelemetry_hl_with_exporter_test_configuration(
+ hl_test_exporter: HumanloopSpanExporter,
+ opentelemetry_test_provider: TracerProvider,
+) -> Generator[tuple[Tracer, HumanloopSpanExporter], None, None]:
+ processor = HumanloopSpanProcessor(exporter=hl_test_exporter)
+ opentelemetry_test_provider.add_span_processor(processor)
+ instrumentor = OpenAIInstrumentor()
+ instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ tracer = opentelemetry_test_provider.get_tracer("test")
+ INTERNAL_OT._TRACER = tracer
+
+ yield tracer, hl_test_exporter
+
+ instrumentor.uninstrument()
+ INTERNAL_OT._TRACER = None
+
+
+@pytest.fixture(scope="session")
+def call_llm_messages() -> list[dict]:
+ return [
+ {
+ "role": "system",
+ "content": "You are an assistant on the following topics: greetings in foreign languages.",
+ },
+ {
+ "role": "user",
+ "content": "Bonjour!",
+ },
+ ]
diff --git a/tests/decorators/__init__.py b/tests/decorators/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
new file mode 100644
index 00000000..f2c36ee8
--- /dev/null
+++ b/tests/decorators/test_flow_decorator.py
@@ -0,0 +1,219 @@
+import os
+import random
+import string
+from unittest.mock import patch
+
+import pytest
+from openai import OpenAI
+from opentelemetry.sdk.trace import Tracer
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+
+from humanloop.decorators.flow import flow
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel.exporter import HumanloopSpanExporter
+from humanloop.otel.helpers import read_from_opentelemetry_span
+from humanloop.decorators.prompt import prompt
+from humanloop.decorators.tool import tool
+
+
+@tool()
+def _random_string() -> str:
+ """Return a random string."""
+ # NOTE: This is very basic; scope is to check if it's
+ # picked up and included in the Flow Trace
+ return "".join(random.choices(string.ascii_letters + string.digits, k=10))
+
+
+@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
+def _call_llm(messages: list[dict]) -> str:
+ # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
+ # provider calls. Could not find a way to intercept them coming from a Mock.
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ ) + _random_string()
+
+
+@flow(attributes={"foo": "bar", "baz": 7})
+def _agent_call(messages: list[dict]) -> str:
+ return _call_llm(messages=messages)
+
+
+@flow()
+def _flow_over_flow(messages: list[dict]) -> str:
+ return _agent_call(messages=messages)
+
+
+def test_no_flow(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN a call to @prompt annotated function that calls a @tool
+ _, exporter = opentelemetry_hl_test_configuration
+ _call_llm(
+ [
+ {
+ "role": "system",
+ "content": "You are an assistant on the following topics: greetings in foreign languages.",
+ },
+ {
+ "role": "user",
+ "content": "Hello, how are you?",
+ },
+ ]
+ )
+ # WHEN exporting the spans
+ spans = exporter.get_finished_spans()
+ # THEN 2 independent spans are exported with no relation to each other
+ assert len(spans) == 2
+ assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
+ for span in spans:
+ # THEN no metadata related to trace is present on either of them
+ with pytest.raises(KeyError):
+ read_from_opentelemetry_span(span=span, key=HL_TRACE_METADATA_KEY)
+
+
+def test_with_flow(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN a @flow entrypoint to an instrumented application
+ _, exporter = opentelemetry_hl_test_configuration
+ # WHEN calling the Flow
+ _agent_call(
+ [
+ {
+ "role": "system",
+ "content": "You are an assistant on the following topics: greetings in foreign languages.",
+ },
+ {
+ "role": "user",
+ "content": "Hello, how are you?",
+ },
+ ]
+ )
+ # THEN 3 spans are created
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 3
+ # THEN the span are returned bottom to top
+ assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["flow"]
+ tool_trace_metadata = read_from_opentelemetry_span(span=spans[0], key=HL_TRACE_METADATA_KEY)
+ prompt_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
+ flow_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
+ # THEN Tool span is a child of Prompt span
+ assert tool_trace_metadata["trace_parent_id"] == spans[1].context.span_id
+ assert tool_trace_metadata["is_flow_log"] is False
+ assert prompt_trace_metadata["trace_parent_id"] == spans[2].context.span_id
+ # THEN Prompt span is a child of Flow span
+ assert prompt_trace_metadata["is_flow_log"] is False
+ assert flow_trace_metadata["is_flow_log"]
+ assert flow_trace_metadata["trace_id"] == spans[2].context.span_id
+
+
+def test_flow_in_flow(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ call_llm_messages: list[dict],
+):
+ # GIVEN A configured OpenTelemetry tracer and exporter
+ _, exporter = opentelemetry_hl_test_configuration
+
+ # WHEN Calling the _test_flow_in_flow function with specific messages
+ _flow_over_flow(call_llm_messages)
+
+ # THEN Spans correctly produce a Flow Trace
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 4
+ assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["flow"]
+ assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
+
+ tool_trace_metadata = read_from_opentelemetry_span(span=spans[0], key=HL_TRACE_METADATA_KEY)
+ prompt_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
+ nested_flow_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
+ flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
+ # THEN the nested flow points to the parent flow
+ assert tool_trace_metadata["trace_parent_id"] == spans[1].context.span_id
+ assert tool_trace_metadata["is_flow_log"] is False
+ assert prompt_trace_metadata["trace_parent_id"] == spans[2].context.span_id
+ assert prompt_trace_metadata["is_flow_log"] is False
+ assert nested_flow_trace_metadata["trace_id"] == spans[2].context.span_id
+ # THEN the parent flow correctly points to itself
+ assert nested_flow_trace_metadata["is_flow_log"]
+ assert nested_flow_trace_metadata["trace_parent_id"] == spans[3].context.span_id
+ assert flow_trace_metadata["is_flow_log"]
+ assert flow_trace_metadata["trace_id"] == spans[3].context.span_id
+
+
+def test_hl_exporter_with_flow(
+ call_llm_messages: list[dict],
+ opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
+):
+ # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter
+ _, exporter = opentelemetry_hl_with_exporter_test_configuration
+ with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
+ # WHEN calling the @flow decorated function
+ _agent_call(call_llm_messages)
+ assert len(mock_export_method.call_args_list) == 3
+ first_exported_span = mock_export_method.call_args_list[0][0][0][0]
+ middle_exported_span = mock_export_method.call_args_list[1][0][0][0]
+ last_exported_span = mock_export_method.call_args_list[2][0][0][0]
+ # THEN the last uploaded span is the Flow
+ assert read_from_opentelemetry_span(span=last_exported_span, key=HL_FILE_OT_KEY)["flow"]["attributes"] == {
+ "foo": "bar",
+ "baz": 7,
+ }
+ # THEN the second uploaded span is the Prompt
+ assert "prompt" in read_from_opentelemetry_span(span=middle_exported_span, key=HL_FILE_OT_KEY)
+ # THEN the first uploaded span is the Tool
+ assert "tool" in read_from_opentelemetry_span(span=first_exported_span, key=HL_FILE_OT_KEY)
+
+ # THEN the first Log uploaded is the Flow
+ first_log = exporter._client.flows.log.call_args_list[0][1]
+ assert "flow" in first_log
+ exporter._client.flows.log.assert_called_once()
+ flow_log_call_args = exporter._client.flows.log.call_args_list[0]
+ flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7}
+ flow_log_id = exporter._client.flows.log.return_value
+
+ # THEN the second Log uploaded is the Prompt
+ exporter._client.prompts.log.assert_called_once()
+ prompt_log_call_args = exporter._client.prompts.log.call_args_list[0]
+ prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id
+ prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8
+ prompt_log_id = exporter._client.prompts.log.return_value
+
+ # THEN the final Log uploaded is the Tool
+ exporter._client.tools.log.assert_called_once()
+ tool_log_call_args = exporter._client.tools.log.call_args_list[0]
+ tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id
+
+
+def test_nested_flow_exporting(
+ call_llm_messages: list[dict],
+ opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
+):
+ # GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter
+ _, exporter = opentelemetry_hl_with_exporter_test_configuration
+ with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
+ # WHEN calling the @flow decorated function
+ _flow_over_flow(call_llm_messages)
+ assert len(mock_export_method.call_args_list) == 4
+ # THEN the last uploaded span is the larger Flow
+ # THEN the second to last uploaded span is the nested Flow
+ last_exported_span = mock_export_method.call_args_list[3][0][0][0]
+ previous_exported_span = mock_export_method.call_args_list[2][0][0][0]
+ last_span_flow_metadata = read_from_opentelemetry_span(span=last_exported_span, key=HL_TRACE_METADATA_KEY)
+ previous_span_flow_metadata = read_from_opentelemetry_span(
+ span=previous_exported_span, key=HL_TRACE_METADATA_KEY
+ )
+ assert previous_span_flow_metadata["trace_parent_id"] == last_exported_span.context.span_id
+ assert last_span_flow_metadata["is_flow_log"]
+ assert previous_span_flow_metadata["is_flow_log"]
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
new file mode 100644
index 00000000..d4a36755
--- /dev/null
+++ b/tests/decorators/test_prompt_decorator.py
@@ -0,0 +1,97 @@
+import os
+
+from dotenv import load_dotenv
+from openai import OpenAI
+from opentelemetry.sdk.trace import Tracer
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+
+from humanloop.otel.constants import HL_FILE_OT_KEY
+from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
+from humanloop.decorators.prompt import prompt
+
+
+@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
+def _call_llm(messages: list[dict]) -> str:
+ load_dotenv()
+ # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
+ # provider calls. Could not find a way to intercept them coming from a Mock.
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+
+
+@prompt(path=None, template="You are an assistant on the following topics: {topics}.", temperature=0.9, top_p=0.1)
+def _call_llm_with_defaults(messages: list[dict]) -> str:
+ load_dotenv()
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+
+
+def test_prompt(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ call_llm_messages: list[dict],
+):
+ # GIVEN a default OpenTelemetry configuration
+ _, exporter = opentelemetry_test_configuration
+ # WHEN using the Prompt decorator
+ _call_llm(messages=call_llm_messages)
+ # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 2
+ # THEN the Prompt span is not enhanced with information from the LLM provider
+ assert is_humanloop_span(spans[1])
+ assert spans[1].attributes.get("prompt") is None
+
+
+def test_prompt_hl_processor(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ call_llm_messages: list[dict],
+):
+ # GIVEN an OpenTelemetry configuration with a Humanloop Span processor
+ _, exporter = opentelemetry_hl_test_configuration
+ # WHEN using the Prompt decorator
+ _call_llm(messages=call_llm_messages)
+ # THEN a single span is created since the LLM provider call span is merged in the Prompt span
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 1
+ assert is_humanloop_span(spans[0])
+ prompt = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"]
+ assert prompt is not None
+ # THEN temperature is taken from LLM provider call, but top_p is not since it is not specified
+ assert prompt["temperature"] == 0.8
+ assert prompt.get("top_p") is None
+
+
+def test_prompt_with_defaults(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ call_llm_messages: list[dict],
+):
+ # GIVEN an OpenTelemetry configuration with a Humanloop Span processor
+ _, exporter = opentelemetry_hl_test_configuration
+ # WHEN using the Prompt decorator with default values
+ _call_llm_with_defaults(messages=call_llm_messages)
+ # THEN a single span is created since the LLM provider call span is merged in the Prompt span
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 1
+ assert is_humanloop_span(spans[0])
+ prompt = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"]
+ assert prompt is not None
+ # THEN temperature is taken from decorator rather than intercepted LLM provider call
+ assert prompt["temperature"] == 0.9
+ # THEN top_p is present
+ assert prompt["top_p"] == 0.1
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
new file mode 100644
index 00000000..225e61fa
--- /dev/null
+++ b/tests/decorators/test_tool_decorator.py
@@ -0,0 +1,43 @@
+from opentelemetry.sdk.trace import Tracer
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+from humanloop.otel.helpers import read_from_opentelemetry_span
+from humanloop.decorators.tool import tool
+
+
+@tool()
+def calculator(operation: str, num1: int, num2: int) -> str:
+ """Do arithmetic operations on two numbers."""
+ if operation == "add":
+ return num1 + num2
+ elif operation == "subtract":
+ return num1 - num2
+ elif operation == "multiply":
+ return num1 * num2
+ elif operation == "divide":
+ return num1 / num2
+ else:
+ return "Invalid operation"
+
+
+def test_calculator_decorator(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN a test OpenTelemetry configuration
+ _, exporter = opentelemetry_hl_test_configuration
+ # WHEN calling the @tool decorated function
+ result = calculator(operation="add", num1=1, num2=2)
+ # THEN a single span is created and the log and file attributes are correctly set
+ spans = exporter.get_finished_spans()
+ assert len(spans) == 1
+ hl_file = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)
+ hl_log = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_OT_KEY)
+ assert hl_log["output"] == result == 3
+ assert hl_log["inputs"] == {
+ "operation": "add",
+ "num1": 1,
+ "num2": 2,
+ }
+ hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers."
+ assert calculator.json_schema == hl_file["tool"]["function"]
diff --git a/tests/otel/__init__.py b/tests/otel/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
new file mode 100644
index 00000000..da3648c6
--- /dev/null
+++ b/tests/otel/test_helpers.py
@@ -0,0 +1,153 @@
+import pytest
+from opentelemetry.sdk.trace import Span
+
+from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span
+
+
+def test_read_empty(test_span: Span):
+ assert read_from_opentelemetry_span(test_span) == {}
+
+
+def test_read_non_existent_key(test_span: Span):
+ with pytest.raises(KeyError):
+ assert read_from_opentelemetry_span(test_span, "key") == {}
+ write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, key="key")
+ assert dict(test_span.attributes) == {
+ "key.x": 7,
+ "key.y": "foo",
+ }
+ with pytest.raises(KeyError):
+ assert read_from_opentelemetry_span(test_span, "key.z") is None
+
+
+def test_simple_dict(test_span: Span):
+ write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, "key")
+ assert dict(test_span.attributes) == {
+ "key.x": 7,
+ "key.y": "foo",
+ }
+ assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": "foo"}
+
+
+def test_no_prefix(test_span: Span):
+ write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"})
+ assert dict(test_span.attributes) == {
+ "x": 7,
+ "y": "foo",
+ }
+ assert read_from_opentelemetry_span(test_span) == {"x": 7, "y": "foo"}
+
+
+def test_nested_object(test_span: Span):
+ write_to_opentelemetry_span(test_span, {"x": 7, "y": {"z": "foo"}}, "key")
+ assert dict(test_span.attributes) == {
+ "key.x": 7,
+ "key.y.z": "foo",
+ }
+ assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": {"z": "foo"}}
+
+
+def test_list(test_span: Span):
+ write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}], "key")
+ assert dict(test_span.attributes) == {
+ "key.0.x": 7,
+ "key.0.y": "foo",
+ "key.1.z": "bar",
+ }
+ assert read_from_opentelemetry_span(test_span, "key") == [
+ {"x": 7, "y": "foo"},
+ {"z": "bar"},
+ ]
+
+
+def test_list_no_prefix(test_span: Span):
+ write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}])
+ assert dict(test_span.attributes) == {
+ "0.x": 7,
+ "0.y": "foo",
+ "1.z": "bar",
+ }
+ assert read_from_opentelemetry_span(test_span) == [
+ {"x": 7, "y": "foo"},
+ {"z": "bar"},
+ ]
+
+
+def test_multiple_nestings(test_span: Span):
+ write_to_opentelemetry_span(
+ test_span,
+ [
+ {"x": 7, "y": "foo"},
+ [{"z": "bar"}, {"a": 42}],
+ ],
+ "key",
+ )
+ assert dict(test_span.attributes) == {
+ "key.0.x": 7,
+ "key.0.y": "foo",
+ "key.1.0.z": "bar",
+ "key.1.1.a": 42,
+ }
+ assert read_from_opentelemetry_span(test_span, "key") == [
+ {"x": 7, "y": "foo"},
+ [{"z": "bar"}, {"a": 42}],
+ ]
+
+
+def test_read_mixed_numeric_string_keys(test_span: Span):
+ test_span.set_attributes(
+ {
+ "key.0.x": 7,
+ "key.0.y": "foo",
+ "key.a.z": "bar",
+ "key.a.a": 42,
+ }
+ )
+ assert read_from_opentelemetry_span(span=test_span, key="key") == {
+ "0": {"x": 7, "y": "foo"},
+ "a": {"z": "bar", "a": 42},
+ }
+ assert read_from_opentelemetry_span(span=test_span) == {
+ "key": {
+ "0": {"x": 7, "y": "foo"},
+ "a": {"z": "bar", "a": 42},
+ }
+ }
+
+
+def test_sub_key_same_as_key(test_span: Span):
+ write_to_opentelemetry_span(test_span, {"key": 7}, "key")
+ assert dict(test_span.attributes) == {
+ "key.key": 7,
+ }
+ assert read_from_opentelemetry_span(test_span, "key") == {"key": 7}
+
+
+def test_read_nested_key(test_span: Span):
+ test_span.set_attributes({"key.x": 7, "key.y.z": "foo"})
+ assert read_from_opentelemetry_span(span=test_span, key="key.y") == {"z": "foo"}
+
+
+def test_write_read_sub_key(test_span: Span):
+ write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, "key")
+ assert read_from_opentelemetry_span(test_span, "key.x") == 7
+ assert read_from_opentelemetry_span(test_span, "key.y") == "foo"
+ assert read_from_opentelemetry_span(test_span, "key") == {"x": 7, "y": "foo"}
+
+
+def test_write_drops_dict_all_null_values(test_span: Span):
+ # GIVEN a test_span to which a value with null values is written
+ write_to_opentelemetry_span(test_span, {"x": None, "y": None}, "key")
+ # WHEN reading the value from the span
+ # THEN the value is not present in the span attributes
+ assert "key" not in test_span.attributes
+ with pytest.raises(KeyError):
+ read_from_opentelemetry_span(test_span, "key") == {}
+
+
+def test_write_drops_null_value_from_dict(test_span: Span):
+ # GIVEN a test_span to which a dict with some null values are written
+ write_to_opentelemetry_span(test_span, {"x": 2, "y": None}, "key")
+ # WHEN reading the values from the span
+ # THEN the value with null value is not present in the span attributes
+ read_from_opentelemetry_span(test_span, "key") == {"x": 2}
From 4cc0e3edab74144f9b34f04ec26c42c989ea51ad Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sat, 26 Oct 2024 22:39:08 +0300
Subject: [PATCH 06/70] Added threading to exporter
---
src/humanloop/client.py | 125 +++++++++++++++++++-----
src/humanloop/decorators/flow.py | 17 ----
src/humanloop/decorators/prompt.py | 20 ----
src/humanloop/decorators/tool.py | 15 ---
src/humanloop/otel/exporter.py | 83 +++++++++-------
tests/decorators/test_flow_decorator.py | 5 +
6 files changed, 153 insertions(+), 112 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 64f5e177..e380f15c 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,16 +1,14 @@
import typing
-from typing import Optional, List, Sequence
+from typing import Literal, Optional, List, Sequence
import os
import httpx
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
-from .decorators.flow import flow
-from .decorators.prompt import prompt
-from .decorators.tool import tool
+from .decorators.flow import flow as flow_decorator
+from .decorators.prompt import prompt as prompt_decorator
+from .decorators.tool import tool as tool_decorator
from humanloop.core.client_wrapper import SyncClientWrapper
-from humanloop.flows.client import FlowsClient
-from humanloop.tools.client import ToolsClient
from .otel.exporter import HumanloopSpanExporter
from .otel.processor import HumanloopSpanProcessor
from .otel import instrument_provider, set_tracer
@@ -60,27 +58,9 @@ class ExtendedPromptsClient(PromptsClient):
def __init__(self, client_wrapper: SyncClientWrapper):
super().__init__(client_wrapper=client_wrapper)
- decorate = staticmethod(prompt)
- decorate.__doc__ = prompt.__doc__
populate_template = staticmethod(populate_template)
-class ExtendedToolsClient(ToolsClient):
- def __init__(self, client_wrapper: SyncClientWrapper):
- super().__init__(client_wrapper=client_wrapper)
-
- decorate = staticmethod(tool)
- decorate.__doc__ = tool.__doc__
-
-
-class ExtendedFlowsClient(FlowsClient):
- def __init__(self, client_wrapper: SyncClientWrapper):
- super().__init__(client_wrapper=client_wrapper)
-
- decorate = staticmethod(flow)
- decorate.__doc__ = flow.__doc__
-
-
class Humanloop(BaseHumanloop):
"""
See docstring of BaseHumanloop.
@@ -133,8 +113,101 @@ def __init__(
eval_client.client = self
self.evaluations = eval_client
self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
- self.flows = ExtendedFlowsClient(client_wrapper=self._client_wrapper)
- self.tools = ExtendedToolsClient(client_wrapper=self._client_wrapper)
+
+ def prompt(
+ self,
+ # TODO: Template can be a list of objects
+ path: str | None = None,
+ model: str | None = None,
+ endpoint: Literal["chat", "edit", "complete"] | None = None,
+ template: str | None = None,
+ provider: Literal[
+ "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"
+ ]
+ | None = None,
+ max_tokens: int | None = None,
+ stop: str | list[str] | None = None,
+ temperature: float | None = None,
+ top_p: float | None = None,
+ presence_penalty: float | None = None,
+ frequency_penalty: float | None = None,
+ ):
+ """Decorator to mark a function as a Humanloop Prompt.
+
+ The decorator intercepts calls to LLM provider APIs and uses them
+ in tandem with the template provided by the user to create a Prompt
+ in Humanloop.
+
+ Arguments:
+ path: Optional. The path where the Prompt is created. If not
+ provided, the function name is used as the path and
+ the File is created in the root of your Humanloop's
+ organization workspace.
+ template: The template for the Prompt. This is the text of
+ the system message used to set the LLM prompt. The template
+ accepts template slots using the format `{slot_name}`.
+
+ The text of the system message is matched against the template
+ to extract the slot values. The extracted values will be
+ available in the Log's inputs
+ """
+ return prompt_decorator(
+ path=path,
+ model=model,
+ endpoint=endpoint,
+ template=template,
+ provider=provider,
+ max_tokens=max_tokens,
+ stop=stop,
+ temperature=temperature,
+ top_p=top_p,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ )
+
+ def tool(
+ self,
+ path: str | None = None,
+ attributes: dict[str, typing.Any] | None = None,
+ ):
+ """Decorator to mark a function as a Humanloop Tool.
+
+ The decorator inspect the wrapped function signature and code to infer
+ the File kernel and JSON schema for the Tool. Any change to the decorated
+ function will create a new version of the Tool, provided that the path
+ remains the same.
+
+ Every call to the decorated function will create a Log against the Tool.
+
+ Arguments:
+ path: Optional. The path to the Tool. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+ """
+ return tool_decorator(path=path, attributes=attributes)
+
+ def flow(
+ self,
+ path: str | None = None,
+ attributes: dict[str, typing.Any] = {},
+ ):
+ """Decorator to log a Flow to the Humanloop API.
+
+ The decorator logs the inputs and outputs of the decorated function to
+ create a Log against the Flow in Humanloop.
+
+ The decorator is an entrypoint to the instrumented AI feature. Decorated
+ functions called in the context of function decorated with Flow will create
+ a Trace in Humanloop.
+
+ Arguments:
+ path: Optional. The path to the Flow. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+ attributes: Optional. The attributes of the Flow. The attributes are used
+ to version the Flow.
+ """
+ return flow_decorator(path=path, attributes=attributes)
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index be49e395..67bbc03d 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -13,23 +13,6 @@ def flow(
path: str | None = None,
attributes: dict[str, Any] = {},
):
- """Decorator to log a Flow to the Humanloop API.
-
- The decorator logs the inputs and outputs of the decorated function to
- create a Log against the Flow in Humanloop.
-
- The decorator is an entrypoint to the instrumented AI feature. Decorated
- functions called in the context of function decorated with Flow will create
- a Trace in Humanloop.
-
- Arguments:
- path: Optional. The path to the Flow. If not provided, the function name
- will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
- attributes: Optional. The attributes of the Flow. The attributes are used
- to version the Flow.
- """
-
def decorator(func: callable):
@wraps(func)
def wrapper(*args, **kwargs):
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 72e119c5..2c7c5324 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -22,26 +22,6 @@ def prompt(
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
):
- """Decorator to mark a function as a Humanloop Prompt.
-
- The decorator intercepts calls to LLM provider APIs and uses them
- in tandem with the template provided by the user to create a Prompt
- in Humanloop.
-
- Arguments:
- path: Optional. The path where the Prompt is created. If not
- provided, the function name is used as the path and
- the File is created in the root of your Humanloop's
- organization workspace.
- template: The template for the Prompt. This is the text of
- the system message used to set the LLM prompt. The template
- accepts template slots using the format `{slot_name}`.
-
- The text of the system message is matched against the template
- to extract the slot values. The extracted values will be
- available in the Log's inputs
- """
-
def decorator(func: callable):
decorator.__hl_file_id = uuid.uuid4()
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index e53ed85f..6c693103 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -134,21 +134,6 @@ def _extract_tool_kernel(func: callable) -> dict:
def tool(path: str | None = None, attributes: dict[str, typing.Any] | None = None):
- """Decorator to mark a function as a Humanloop Tool.
-
- The decorator inspect the wrapped function signature and code to infer
- the File kernel and JSON schema for the Tool. Any change to the decorated
- function will create a new version of the Tool, provided that the path
- remains the same.
-
- Every call to the decorated function will create a Log against the Tool.
-
- Arguments:
- path: Optional. The path to the Tool. If not provided, the function name
- will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
- """
-
def decorator(func: callable):
func.json_schema = _tool_json_schema(func)
decorator.__hl_file_id = uuid.uuid4()
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index a904a1eb..411ec7a3 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,3 +1,5 @@
+from queue import Queue
+from threading import Thread
import typing
from opentelemetry import trace
from opentelemetry.sdk.trace import Span
@@ -13,18 +15,63 @@
class HumanloopSpanExporter(SpanExporter):
"""SpanExporter that uploads OpenTelemetry spans to Humanloop Humanloop spans."""
+ WORK_THREADS = 8
+
def __init__(self, client: "BaseHumanloop") -> None:
super().__init__()
self._client = client
self._uploaded_log_ids = {}
- self._upload_queue = []
+ self._upload_queue = Queue()
+ self._threads = [Thread(target=self._do_work, daemon=True) for _ in range(self.WORK_THREADS)]
+ self._shutdown = False
+ for thread in self._threads:
+ thread.start()
+
+ def export(self, spans: trace.Sequence[Span]) -> SpanExportResult:
+ for span in spans:
+ self._upload_queue.put(span)
+
+ def shutdown(self) -> None:
+ self._shutdown = True
+ for thread in self._threads:
+ thread.join()
+
+ def force_flush(self, timeout_millis: int = 3000) -> bool:
+ self._shutdown = True
+ for thread in self._threads:
+ thread.join(timeout=timeout_millis)
+ self._upload_queue.join()
+
+ return True
+
+ def _do_work(self):
+ # Do work while the Exporter was not instructed to
+ # wind down or the queue is not empty
+ while self._upload_queue.qsize() > 0 or not self._shutdown:
+ try:
+ # Don't block or the thread will never see the shutdown
+ # command and will get stuck
+ span_to_export: Span = self._upload_queue.get(block=False)
+ except Exception:
+ continue
+ try:
+ trace_metadata = read_from_opentelemetry_span(span_to_export, key=HL_TRACE_METADATA_KEY)
+ except KeyError:
+ trace_metadata = None
+ if "trace_parent_id" not in trace_metadata or trace_metadata["trace_parent_id"] in self._uploaded_log_ids:
+ # The Span is outside a Trace context or its parent has been uploaded
+ # we can safely upload it to Humanloop
+ self._export_dispatch(span_to_export)
+ else: # The parent has not been uploaded yet
+ # Requeue the Span to be uploaded later
+ self._upload_queue.put(span_to_export)
+ self._upload_queue.task_done()
def _export_prompt(self, span: Span) -> None:
file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
try:
trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
-
except KeyError:
trace_metadata = None
if trace_metadata:
@@ -99,35 +146,3 @@ def _export_dispatch(self, span: Span) -> None:
else:
raise NotImplementedError(f"Unknown span type: {hl_file}")
export_func(span=span)
-
- def export(self, spans: trace.Sequence[Span]) -> SpanExportResult:
- # TODO: Put this on a separate thread
- for span in spans:
- try:
- flow_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
- except KeyError:
- flow_metadata = None
- if flow_metadata:
- # Span is part of a Flow, queue up Spans for upload until the Trace Head is exported.
- # The spans arrive at the Exporter in reverse order or creation, as they end.
- # We insert them at the front of the queue so that they are processed in the correct order
- self._upload_queue.insert(0, span)
- if (
- flow_metadata["is_flow_log"]
- # The Flow might be nested in another Flow
- # i.e. has trace_parent_id set.
- # Wait until the top level Flow is exported
- and "trace_parent_id" not in flow_metadata
- ):
- # TODO: Add threading to this: sibling Spans on the same
- # depth level in the Trace can be uploaded in parallel
- while len(self._upload_queue) > 0:
- span = self._upload_queue.pop(0)
- self._export_dispatch(span)
- else:
- # Span is not part of Flow, upload as singular
- self._export_dispatch(span)
-
- def force_flush(self, timeout_millis: int = 30000) -> bool:
- # TODO: When implementing the multi-threaded version of export, this will need to be updated
- return True
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index f2c36ee8..e12f6322 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -1,6 +1,7 @@
import os
import random
import string
+import time
from unittest.mock import patch
import pytest
@@ -175,6 +176,10 @@ def test_hl_exporter_with_flow(
# THEN the first uploaded span is the Tool
assert "tool" in read_from_opentelemetry_span(span=first_exported_span, key=HL_FILE_OT_KEY)
+ # Potentially flaky: Exporter is threaded, need
+ # to wait for them to finish
+ time.sleep(3)
+
# THEN the first Log uploaded is the Flow
first_log = exporter._client.flows.log.call_args_list[0][1]
assert "flow" in first_log
From bf51f0182c25d48bebc9addea0cb503574976496 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sat, 26 Oct 2024 22:43:52 +0300
Subject: [PATCH 07/70] Bumped python version in GitHub action
---
.github/workflows/ci.yml | 6 +++---
pyproject.toml | 1 -
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index fa41c1f2..00eb16e5 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,7 @@ jobs:
- name: Set up python
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: 3.9
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
@@ -26,7 +26,7 @@ jobs:
- name: Set up python
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: 3.9
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
@@ -46,7 +46,7 @@ jobs:
- name: Set up python
uses: actions/setup-python@v4
with:
- python-version: 3.8
+ python-version: 3.9
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
diff --git a/pyproject.toml b/pyproject.toml
index 621c54bc..f1e03b52 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -10,7 +10,6 @@ classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
From 4850ad2ad5bb46ae144d692c2d7a8106f949dc31 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sat, 26 Oct 2024 22:54:48 +0300
Subject: [PATCH 08/70] Mypy complaints
---
poetry.lock | 13 +++++-
pyproject.toml | 1 +
src/humanloop/decorators/flow.py | 4 +-
src/humanloop/decorators/helpers.py | 4 +-
src/humanloop/decorators/prompt.py | 4 +-
src/humanloop/decorators/tool.py | 62 ++++++++++++++---------------
src/humanloop/otel/helpers.py | 29 +++++++-------
src/humanloop/otel/processor.py | 22 +++++-----
8 files changed, 74 insertions(+), 65 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 1cd88b54..4cf6f506 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -639,6 +639,17 @@ files = [
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
+[[package]]
+name = "parse"
+version = "1.20.2"
+description = "parse() is the opposite of format()"
+optional = false
+python-versions = "*"
+files = [
+ {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
+ {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
+]
+
[[package]]
name = "pluggy"
version = "1.5.0"
@@ -1257,4 +1268,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "01a2b6f6439c0390145f670ef2c65f4698d4df7af626906888692938ab7166d8"
+content-hash = "e5f2ed4f52d279a314534a175abf7a04f7bacabba0db2fc29c734b2b796d96bb"
diff --git a/pyproject.toml b/pyproject.toml
index f1e03b52..4ed66e8e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -44,6 +44,7 @@ opentelemetry-instrumentation-cohere = "^0.33.3"
opentelemetry-instrumentation-anthropic = "^0.33.3"
opentelemetry-instrumentation-mistralai = "^0.33.3"
+parse = "^1.20.2"
[tool.poetry.dev-dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 67bbc03d..16ea7fc4 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,6 +1,6 @@
import uuid
from functools import wraps
-from typing import Any
+from typing import Any, Callable
from humanloop.decorators.helpers import args_to_inputs
@@ -13,7 +13,7 @@ def flow(
path: str | None = None,
attributes: dict[str, Any] = {},
):
- def decorator(func: callable):
+ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
diff --git a/src/humanloop/decorators/helpers.py b/src/humanloop/decorators/helpers.py
index 4c926a3f..2c4e7195 100644
--- a/src/humanloop/decorators/helpers.py
+++ b/src/humanloop/decorators/helpers.py
@@ -1,8 +1,8 @@
import inspect
-from typing import Any
+from typing import Any, Callable
-def args_to_inputs(func: callable, args: tuple, kwargs: dict) -> dict[str, Any]:
+def args_to_inputs(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
signature = inspect.signature(func)
bound_args = signature.bind(*args, **kwargs)
bound_args.apply_defaults()
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 2c7c5324..d54f36cc 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,6 +1,6 @@
import uuid
from functools import wraps
-from typing import Literal
+from typing import Literal, Callable
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
@@ -22,7 +22,7 @@ def prompt(
presence_penalty: float | None = None,
frequency_penalty: float | None = None,
):
- def decorator(func: callable):
+ def decorator(func: Callable):
decorator.__hl_file_id = uuid.uuid4()
if temperature is not None:
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 6c693103..2c02e20a 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -3,6 +3,7 @@
import textwrap
import typing
import uuid
+from typing import Callable, Union
from functools import wraps
@@ -14,19 +15,17 @@
def _type_to_schema(type_hint):
- match type_hint:
- case builtins.int:
- return "number"
- case builtins.float:
- return "number"
- case builtins.bool:
- return "boolean"
- case builtins.str:
- return "string"
- case builtins.dict:
- return "object"
- case _:
- raise ValueError(f"Unsupported type hint: {type_hint}")
+ if isinstance(type_hint, int):
+ return "number"
+ if isinstance(type_hint, float):
+ return "number"
+ if isinstance(type_hint, bool):
+ return "boolean"
+ if isinstance(type_hint, str):
+ return "string"
+ if isinstance(type_hint, dict):
+ return "object"
+ raise ValueError(f"Unsupported type hint: {type_hint}")
def _handle_dict_annotation(parameter: inspect.Parameter) -> dict[str, object]:
@@ -88,28 +87,27 @@ def _parse_tool_parameters_schema(func) -> dict[str, dict]:
inspect.Parameter.VAR_KEYWORD,
):
raise ValueError("Varargs and kwargs are not supported")
- match typing.get_origin(parameter.annotation):
- case builtins.dict:
- param_schema = _handle_dict_annotation(parameter)
- parameters_schema["required"].append(parameter.name)
- required.append(parameter.name)
- case builtins.list:
- param_schema = _handle_list_annotation(parameter)
- parameters_schema["required"].append(parameter.name)
- required.append(parameter.name)
- case typing.Union:
- param_schema = _handle_union_annotation(parameter)
- case None:
- param_schema = _handle_simple_type(parameter)
- required.append(parameter.name)
- case _:
- raise ValueError("Unsupported type hint ", parameter)
+ if isinstance(origin := typing.get_origin(parameter.annotation), dict):
+ param_schema = _handle_dict_annotation(parameter)
+ parameters_schema["required"].append(parameter.name)
+ required.append(parameter.name)
+ elif isinstance(origin, list):
+ param_schema = _handle_list_annotation(parameter)
+ parameters_schema["required"].append(parameter.name)
+ required.append(parameter.name)
+ elif isinstance(origin, Union):
+ param_schema = _handle_union_annotation(parameter)
+ elif origin is None:
+ param_schema = _handle_simple_type(parameter)
+ required.append(parameter.name)
+ else:
+ raise ValueError("Unsupported type hint ", parameter)
parameters_schema["properties"][parameter.name] = param_schema
parameters_schema["required"] = required
return parameters_schema
-def _tool_json_schema(func: callable):
+def _tool_json_schema(func: Callable):
tool_name = func.__name__
description = func.__doc__
if description is None:
@@ -121,7 +119,7 @@ def _tool_json_schema(func: callable):
}
-def _extract_tool_kernel(func: callable) -> dict:
+def _extract_tool_kernel(func: Callable) -> dict:
return {
"source_code": textwrap.dedent(
# Remove the tool decorator from source code
@@ -134,7 +132,7 @@ def _extract_tool_kernel(func: callable) -> dict:
def tool(path: str | None = None, attributes: dict[str, typing.Any] | None = None):
- def decorator(func: callable):
+ def decorator(func: Callable):
func.json_schema = _tool_json_schema(func)
decorator.__hl_file_id = uuid.uuid4()
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 0e60cf11..5d2a9192 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -1,12 +1,12 @@
import builtins
from typing import Any
-from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace import ReadableSpan
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
-def write_to_opentelemetry_span(span: Span, value: Any, key: str = "") -> None:
+def write_to_opentelemetry_span(span: ReadableSpan, value: Any, key: str = "") -> None:
"""Reverse of read_from_opentelemetry_span. Writes a Python object to the OpenTelemetry Span's attributes.
See `read_from_opentelemetry_span` for more information.
@@ -17,7 +17,7 @@ def write_to_opentelemetry_span(span: Span, value: Any, key: str = "") -> None:
key: Key prefix to write to the span attributes. The path to the values does not
need to exist in the span attributes.
"""
- to_write = dict()
+ to_write: dict[str, Any] = {}
_linear_object(to_write, value)
for k, v in to_write.items():
# OTT
@@ -27,7 +27,7 @@ def write_to_opentelemetry_span(span: Span, value: Any, key: str = "") -> None:
# _cache[(span.context.span_id, key)] = value
-def read_from_opentelemetry_span(span: Span, key: str = "") -> dict | list:
+def read_from_opentelemetry_span(span: ReadableSpan, key: str = ""):
"""Read a value from the OpenTelemetry span attributes.
OpenTelemetry liniarises dictionaries and lists, storing only primitive values
@@ -144,15 +144,14 @@ def _linear_object(obj: dict, current: dict | list | Any, key: str = ""):
```
"""
- match type(current):
- case builtins.dict:
- for k, v in current.items():
- _linear_object(obj, v, f"{key}.{k}" if key != "" else k)
- case builtins.list:
- for idx, v in enumerate(current):
- _linear_object(obj, v, f"{key}.{idx}" if key != "" else str(idx))
- case _:
- obj[key] = current
+ if isinstance(current, builtins.dict):
+ for k, v in current.items():
+ _linear_object(obj, v, f"{key}.{k}" if key != "" else k)
+ elif isinstance(current, list):
+ for idx, v in enumerate(current):
+ _linear_object(obj, v, f"{key}.{idx}" if key != "" else str(idx))
+ else:
+ obj[key] = current
def _dict_to_list(d: dict[str, Any]) -> dict | list:
@@ -171,12 +170,12 @@ def _dict_to_list(d: dict[str, Any]) -> dict | list:
return d
-def is_llm_provider_call(span: Span) -> bool:
+def is_llm_provider_call(span: ReadableSpan) -> bool:
"""Determines if the span was created by an Instrumentor for LLM provider clients."""
return "llm.request.type" in span.attributes
-def is_humanloop_span(span: Span) -> bool:
+def is_humanloop_span(span: ReadableSpan) -> bool:
"""Determines if the span was created by the Humanloop SDK."""
try:
read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index ce45933f..cd300482 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -3,7 +3,7 @@
from collections import defaultdict
import parse
-from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
@@ -26,10 +26,10 @@ def __init__(self, exporter: SpanExporter) -> None:
self._children = defaultdict(list)
# TODO: Could override on_start and process Flow spans ahead of time
- # and PATCH the created Logs in on_end. A special type of Span could be
+ # and PATCH the created Logs in on_end. A special type of ReadableSpan could be
# used for this
- def on_end(self, span: Span) -> None:
+ def on_end(self, span: ReadableSpan) -> None:
if is_humanloop_span(span=span):
_process_humanloop_span(span, self._children[span.context.span_id])
del self._children[span.context.span_id]
@@ -39,13 +39,13 @@ def on_end(self, span: Span) -> None:
self._children[span.parent.span_id].append(span)
-def _is_instrumentor_span(span: Span) -> bool:
+def _is_instrumentor_span(span: ReadableSpan) -> bool:
# TODO: Extend in the future as needed. Spans not coming from
# Instrumentors of interest should be dropped
return is_llm_provider_call(span=span)
-def _process_humanloop_span(span: Span, children_spans: list[Span]):
+def _process_humanloop_span(span: ReadableSpan, children_spans: list[ReadableSpan]):
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
if "prompt" in hl_file:
@@ -61,7 +61,7 @@ def _process_humanloop_span(span: Span, children_spans: list[Span]):
logging.error("Invalid span type")
-def _process_prompt(prompt_span: Span, children_spans: list[Span]):
+def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan]):
if len(children_spans) == 0:
return
child_span = children_spans[0]
@@ -70,7 +70,7 @@ def _process_prompt(prompt_span: Span, children_spans: list[Span]):
_enrich_prompt_span_log(prompt_span, child_span)
-def _process_tool(tool_span: Span, children_spans: list[Span]):
+def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
# TODO: Use children_spans in the future
tool_log = read_from_opentelemetry_span(tool_span, key=HL_LOG_OT_KEY)
tool_log["start_time"] = tool_span.start_time / 1e9
@@ -84,7 +84,7 @@ def _process_tool(tool_span: Span, children_spans: list[Span]):
)
-def _process_flow(flow_span: Span, children_spans: list[Span]):
+def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
# TODO: Use children_spans in the future
flow_log = read_from_opentelemetry_span(flow_span, key=HL_LOG_OT_KEY)
flow_log["start_time"] = flow_span.start_time / 1e9
@@ -98,7 +98,7 @@ def _process_flow(flow_span: Span, children_spans: list[Span]):
)
-def _enrich_prompt_span_file(prompt_span: Span, llm_provider_call_span: Span):
+def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
gen_ai_object = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
@@ -131,10 +131,10 @@ def _enrich_prompt_span_file(prompt_span: Span, llm_provider_call_span: Span):
)
-def _enrich_prompt_span_log(prompt_span: Span, llm_provider_call_span: Span) -> Span:
+def _enrich_prompt_span_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
hl_log = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
- gen_ai_object = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
+ gen_ai_object: dict = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
# TODO: Seed not added by Instrumentors in provider call
From 22445e99c9afec3fa986cb0db1cc2794e76a965d Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sat, 26 Oct 2024 23:12:53 +0300
Subject: [PATCH 09/70] Switch to 3.9 typing annotations
---
src/humanloop/client.py | 35 +++++++++++++++---------------
src/humanloop/decorators/flow.py | 4 ++--
src/humanloop/decorators/prompt.py | 27 ++++++++++++-----------
src/humanloop/decorators/tool.py | 4 ++--
src/humanloop/otel/helpers.py | 6 ++---
5 files changed, 38 insertions(+), 38 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index e380f15c..b64448aa 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,5 +1,5 @@
import typing
-from typing import Literal, Optional, List, Sequence
+from typing import Literal, Optional, List, Sequence, Union
import os
import httpx
from opentelemetry.sdk.resources import Resource
@@ -117,20 +117,19 @@ def __init__(
def prompt(
self,
# TODO: Template can be a list of objects
- path: str | None = None,
- model: str | None = None,
- endpoint: Literal["chat", "edit", "complete"] | None = None,
- template: str | None = None,
- provider: Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"
- ]
- | None = None,
- max_tokens: int | None = None,
- stop: str | list[str] | None = None,
- temperature: float | None = None,
- top_p: float | None = None,
- presence_penalty: float | None = None,
- frequency_penalty: float | None = None,
+ path: Optional[str] = None,
+ model: Optional[str] = None,
+ endpoint: Optional[Literal["chat", "edit", "complete"]] = None,
+ template: Optional[str] = None,
+ provider: Optional[
+ Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
+ ] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[Union[str, list[str]]] = None,
+ temperature: Optional[float] = None,
+ top_p: Optional[float] = None,
+ presence_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
):
"""Decorator to mark a function as a Humanloop Prompt.
@@ -167,8 +166,8 @@ def prompt(
def tool(
self,
- path: str | None = None,
- attributes: dict[str, typing.Any] | None = None,
+ path: Optional[str] = None,
+ attributes: Optional[dict[str, typing.Any]] = None,
):
"""Decorator to mark a function as a Humanloop Tool.
@@ -188,7 +187,7 @@ def tool(
def flow(
self,
- path: str | None = None,
+ path: Optional[str] = None,
attributes: dict[str, typing.Any] = {},
):
"""Decorator to log a Flow to the Humanloop API.
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 16ea7fc4..7085bc4d 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,6 +1,6 @@
import uuid
from functools import wraps
-from typing import Any, Callable
+from typing import Any, Callable, Optional
from humanloop.decorators.helpers import args_to_inputs
@@ -10,7 +10,7 @@
def flow(
- path: str | None = None,
+ path: Optional[str] = None,
attributes: dict[str, Any] = {},
):
def decorator(func: Callable):
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index d54f36cc..f718e267 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,6 +1,6 @@
import uuid
from functools import wraps
-from typing import Literal, Callable
+from typing import Literal, Callable, Optional, Union
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
@@ -8,19 +8,20 @@
def prompt(
- path: str | None = None,
+ path: Optional[str] = None,
# TODO: Template can be a list of objects
- model: str | None = None,
- endpoint: Literal["chat", "edit", "complete"] | None = None,
- template: str | None = None,
- provider: Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
- | None = None,
- max_tokens: int | None = None,
- stop: str | list[str] | None = None,
- temperature: float | None = None,
- top_p: float | None = None,
- presence_penalty: float | None = None,
- frequency_penalty: float | None = None,
+ model: Optional[str] = None,
+ endpoint: Optional[Literal["chat", "edit", "complete"]] = None,
+ template: Optional[str] = None,
+ provider: Optional[
+ Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
+ ] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[Union[str, list[str]]] = None,
+ temperature: Optional[float] = None,
+ top_p: Optional[float] = None,
+ presence_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
):
def decorator(func: Callable):
decorator.__hl_file_id = uuid.uuid4()
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 2c02e20a..4e8dd880 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -3,7 +3,7 @@
import textwrap
import typing
import uuid
-from typing import Callable, Union
+from typing import Callable, Optional, Union
from functools import wraps
@@ -131,7 +131,7 @@ def _extract_tool_kernel(func: Callable) -> dict:
}
-def tool(path: str | None = None, attributes: dict[str, typing.Any] | None = None):
+def tool(path: Optional[str] = None, attributes: Optional[dict[str, typing.Any]] = None):
def decorator(func: Callable):
func.json_schema = _tool_json_schema(func)
decorator.__hl_file_id = uuid.uuid4()
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 5d2a9192..c5aff4fb 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -1,5 +1,5 @@
import builtins
-from typing import Any
+from typing import Any, Union
from opentelemetry.sdk.trace import ReadableSpan
@@ -116,7 +116,7 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = ""):
return result
-def _linear_object(obj: dict, current: dict | list | Any, key: str = ""):
+def _linear_object(obj: dict, current: Union[list, dict, Any], key: str = ""):
"""Linearise a Python object into a dictionary.
Method recurses on the `current` argument, collecting all primitive values and their
@@ -154,7 +154,7 @@ def _linear_object(obj: dict, current: dict | list | Any, key: str = ""):
obj[key] = current
-def _dict_to_list(d: dict[str, Any]) -> dict | list:
+def _dict_to_list(d: dict[str, Any]) -> Union[list, dict]:
"""Interpret number keys parsed by the read_from_opentelemetry_span function as lists.
read_from_opentelemetry_span assumes all sub-keys in a path such as foo.0.bar are keys in
From 53db2eef3c6dceeb70533628d82841a976cae248 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sat, 26 Oct 2024 23:15:04 +0300
Subject: [PATCH 10/70] 3.9 typing complaint
---
src/humanloop/decorators/tool.py | 24 +++++++++++++-----------
1 file changed, 13 insertions(+), 11 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 4e8dd880..91e04661 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -15,15 +15,15 @@
def _type_to_schema(type_hint):
- if isinstance(type_hint, int):
+ if type_hint is int:
return "number"
- if isinstance(type_hint, float):
+ if type_hint is float:
return "number"
- if isinstance(type_hint, bool):
+ if type_hint is bool:
return "boolean"
- if isinstance(type_hint, str):
+ if type_hint is str:
return "string"
- if isinstance(type_hint, dict):
+ if type_hint is dict:
return "object"
raise ValueError(f"Unsupported type hint: {type_hint}")
@@ -87,7 +87,14 @@ def _parse_tool_parameters_schema(func) -> dict[str, dict]:
inspect.Parameter.VAR_KEYWORD,
):
raise ValueError("Varargs and kwargs are not supported")
- if isinstance(origin := typing.get_origin(parameter.annotation), dict):
+ origin = typing.get_origin(parameter.annotation)
+ print("HEY", origin, parameter.annotation, origin is None)
+ if origin is Union:
+ param_schema = _handle_union_annotation(parameter)
+ elif origin is None:
+ param_schema = _handle_simple_type(parameter)
+ required.append(parameter.name)
+ elif isinstance(origin, dict):
param_schema = _handle_dict_annotation(parameter)
parameters_schema["required"].append(parameter.name)
required.append(parameter.name)
@@ -95,11 +102,6 @@ def _parse_tool_parameters_schema(func) -> dict[str, dict]:
param_schema = _handle_list_annotation(parameter)
parameters_schema["required"].append(parameter.name)
required.append(parameter.name)
- elif isinstance(origin, Union):
- param_schema = _handle_union_annotation(parameter)
- elif origin is None:
- param_schema = _handle_simple_type(parameter)
- required.append(parameter.name)
else:
raise ValueError("Unsupported type hint ", parameter)
parameters_schema["properties"][parameter.name] = param_schema
From 555797d2c4132d34fed0141853928549b3fb994f Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sun, 27 Oct 2024 00:05:42 +0300
Subject: [PATCH 11/70] debugging
---
src/humanloop/decorators/tool.py | 2 +-
tests/conftest.py | 20 +++++++++-----------
2 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 91e04661..32120527 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -15,6 +15,7 @@
def _type_to_schema(type_hint):
+ print("QWE", type_hint, type_hint is str)
if type_hint is int:
return "number"
if type_hint is float:
@@ -88,7 +89,6 @@ def _parse_tool_parameters_schema(func) -> dict[str, dict]:
):
raise ValueError("Varargs and kwargs are not supported")
origin = typing.get_origin(parameter.annotation)
- print("HEY", origin, parameter.annotation, origin is None)
if origin is Union:
param_schema = _handle_union_annotation(parameter)
elif origin is None:
diff --git a/tests/conftest.py b/tests/conftest.py
index e5056ee5..2ad34511 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -14,17 +14,6 @@
from humanloop.otel.processor import HumanloopSpanProcessor
-@pytest.fixture(scope="function")
-def test_span():
- exporter = InMemorySpanExporter()
- processor = SimpleSpanProcessor(exporter)
- provider = TracerProvider()
- provider.add_span_processor(processor)
- trace.set_tracer_provider(provider)
- tracer = trace.get_tracer("test")
- return tracer.start_span("test_span")
-
-
@pytest.fixture(scope="function")
def opentelemetry_test_provider() -> TracerProvider:
provider = TracerProvider(
@@ -38,6 +27,15 @@ def opentelemetry_test_provider() -> TracerProvider:
return provider
+@pytest.fixture(scope="function")
+def test_span(opentelemetry_test_provider: TracerProvider):
+ exporter = InMemorySpanExporter()
+ processor = SimpleSpanProcessor(exporter)
+ opentelemetry_test_provider.add_span_processor(processor)
+ tracer = opentelemetry_test_provider.get_tracer("test")
+ return tracer.start_span("test_span")
+
+
@pytest.fixture(scope="function")
def opentelemetry_test_configuration(
opentelemetry_test_provider: TracerProvider,
From c55b2d49717aebd465cc1925cfb4a08eb176abaf Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sun, 27 Oct 2024 00:12:59 +0300
Subject: [PATCH 12/70] Added openai secret for testing
---
.github/workflows/ci.yml | 2 ++
src/humanloop/decorators/tool.py | 1 -
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 00eb16e5..59e3e16c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -35,6 +35,8 @@ jobs:
- name: Test
run: poetry run pytest -rP .
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
publish:
needs: [compile, test]
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 32120527..424ac283 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -15,7 +15,6 @@
def _type_to_schema(type_hint):
- print("QWE", type_hint, type_hint is str)
if type_hint is int:
return "number"
if type_hint is float:
From 8816382fba162e1818a3a940c7ae3cd62abc6dd5 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 28 Oct 2024 10:04:24 +0200
Subject: [PATCH 13/70] Fixed mypy type hints
---
poetry.lock | 24 ++++-
pyproject.toml | 3 +
src/humanloop/decorators/flow.py | 1 -
src/humanloop/decorators/prompt.py | 4 +-
src/humanloop/decorators/tool.py | 125 +++++++++++++++-------
src/humanloop/otel/__init__.py | 8 +-
src/humanloop/otel/exporter.py | 65 ++++++-----
src/humanloop/otel/helpers.py | 114 +++++++-------------
src/humanloop/otel/processor.py | 50 +++++----
tests/conftest.py | 8 +-
tests/decorators/test_flow_decorator.py | 35 +++---
tests/decorators/test_prompt_decorator.py | 56 +++++++---
tests/decorators/test_tool_decorator.py | 15 +--
tests/otel/test_helpers.py | 66 +++++++-----
14 files changed, 337 insertions(+), 237 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 4cf6f506..11aa6334 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -650,6 +650,26 @@ files = [
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
]
+[[package]]
+name = "parse-type"
+version = "0.6.4"
+description = "Simplifies to build parse types based on the parse module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+files = [
+ {file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
+ {file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
+]
+
+[package.dependencies]
+parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
+six = ">=1.15"
+
+[package.extras]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
+testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+
[[package]]
name = "pluggy"
version = "1.5.0"
@@ -1268,4 +1288,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "e5f2ed4f52d279a314534a175abf7a04f7bacabba0db2fc29c734b2b796d96bb"
+content-hash = "3dbb1db1562689821e480dbd190f047bfee69dd5f823b309e4c1c5b5602c74e2"
diff --git a/pyproject.toml b/pyproject.toml
index 4ed66e8e..0d5bcfba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,6 +26,9 @@ packages = [
{ include = "humanloop", from = "src"}
]
+[tool.poetry.group.dev.dependencies]
+parse-type = "^0.6.4"
+
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 7085bc4d..ca4d8d73 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -2,7 +2,6 @@
from functools import wraps
from typing import Any, Callable, Optional
-
from humanloop.decorators.helpers import args_to_inputs
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index f718e267..b3f0bf64 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,6 +1,6 @@
import uuid
from functools import wraps
-from typing import Literal, Callable, Optional, Union
+from typing import Callable, Literal, Optional, Union
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
@@ -24,8 +24,6 @@ def prompt(
frequency_penalty: Optional[float] = None,
):
def decorator(func: Callable):
- decorator.__hl_file_id = uuid.uuid4()
-
if temperature is not None:
if not 0 <= temperature < 1:
raise ValueError(f"{func.__name__}: Temperature parameter must be between 0 and 1")
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 424ac283..0bd35b60 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -3,9 +3,8 @@
import textwrap
import typing
import uuid
-from typing import Callable, Optional, Union
from functools import wraps
-
+from typing import Callable, Literal, Optional, TypedDict, Union
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
@@ -14,6 +13,31 @@
from .helpers import args_to_inputs
+class JSONSchemaProperty(TypedDict):
+ type: Literal["number", "boolean", "string", "object"]
+
+
+class JSONSchemaArray(TypedDict):
+ type: Literal["array"]
+ items: JSONSchemaProperty
+
+
+class JSONSchemaObjectProperty(TypedDict):
+ key: JSONSchemaProperty
+ value: JSONSchemaProperty
+
+
+class JSONSchemaObject(TypedDict):
+ type: Literal["object"]
+ properties: JSONSchemaObjectProperty
+
+
+class JSONSchemaFunctionParameters(TypedDict):
+ type: Literal["object"]
+ required: tuple[str]
+ properties: dict[str, Union[JSONSchemaProperty, JSONSchemaArray, JSONSchemaObject]]
+
+
def _type_to_schema(type_hint):
if type_hint is int:
return "number"
@@ -28,7 +52,7 @@ def _type_to_schema(type_hint):
raise ValueError(f"Unsupported type hint: {type_hint}")
-def _handle_dict_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+def _handle_dict_annotation(parameter: inspect.Parameter) -> JSONSchemaObject:
try:
type_key, type_value = typing.get_args(parameter.annotation)
except ValueError:
@@ -43,44 +67,46 @@ def _handle_dict_annotation(parameter: inspect.Parameter) -> dict[str, object]:
dict,
):
raise ValueError("Dict values must be strings or integers", parameter.name, type_value)
- return {
- "type": "object",
- "properties": {
- "key": {"type": _type_to_schema(type_key)},
- "value": {"type": _type_to_schema(type_value)},
- },
- }
+ return JSONSchemaObject(
+ type="object",
+ properties=JSONSchemaObjectProperty(
+ key={"type": _type_to_schema(type_key)},
+ value={"type": _type_to_schema(type_value)},
+ ),
+ )
-def _handle_list_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+def _handle_list_annotation(parameter: inspect.Parameter) -> JSONSchemaArray:
try:
list_type = typing.get_args(parameter.annotation)[0]
except ValueError:
raise ValueError("List annotation must have one type hint")
- return {
- "type": "array",
- "items": {"type": _type_to_schema(list_type)},
- }
+ return JSONSchemaArray(
+ type="array",
+ items={
+ "type": _type_to_schema(list_type),
+ },
+ )
-def _handle_union_annotation(parameter: inspect.Parameter) -> dict[str, object]:
+def _handle_optional_annotation(parameter: inspect.Parameter) -> JSONSchemaProperty:
union_types = [sub_type for sub_type in typing.get_args(parameter.annotation) if sub_type != type(None)]
if len(union_types) != 1:
raise ValueError("Union types are not supported. Try passing a string and parsing inside function")
return {"type": _type_to_schema(union_types[0])}
-def _handle_simple_type(parameter: inspect.Parameter) -> dict[str, object]:
+def _handle_simple_type(parameter: inspect.Parameter) -> JSONSchemaProperty:
if parameter.annotation is None:
raise ValueError("Parameters must have type hints")
return {"type": _type_to_schema(parameter.annotation)}
-def _parse_tool_parameters_schema(func) -> dict[str, dict]:
+def _parse_tool_parameters_schema(func) -> JSONSchemaFunctionParameters:
# TODO: Add tests for this, 100% it is breakable
signature = inspect.signature(func)
- required = []
- parameters_schema = {"type": "object", "properties": {}, "required": []}
+ required: list[str] = []
+ properties: dict[str, Union[JSONSchemaArray, JSONSchemaProperty, JSONSchemaObject]] = {}
for parameter in signature.parameters.values():
if parameter.kind in (
inspect.Parameter.VAR_POSITIONAL,
@@ -88,54 +114,71 @@ def _parse_tool_parameters_schema(func) -> dict[str, dict]:
):
raise ValueError("Varargs and kwargs are not supported")
origin = typing.get_origin(parameter.annotation)
+ param_schema: Union[JSONSchemaProperty, JSONSchemaArray, JSONSchemaObject]
if origin is Union:
- param_schema = _handle_union_annotation(parameter)
+ param_schema = _handle_optional_annotation(parameter)
elif origin is None:
param_schema = _handle_simple_type(parameter)
required.append(parameter.name)
elif isinstance(origin, dict):
param_schema = _handle_dict_annotation(parameter)
- parameters_schema["required"].append(parameter.name)
required.append(parameter.name)
elif isinstance(origin, list):
param_schema = _handle_list_annotation(parameter)
- parameters_schema["required"].append(parameter.name)
required.append(parameter.name)
else:
raise ValueError("Unsupported type hint ", parameter)
- parameters_schema["properties"][parameter.name] = param_schema
- parameters_schema["required"] = required
- return parameters_schema
+ properties[parameter.name] = param_schema
+ return JSONSchemaFunctionParameters(
+ type="object",
+ # False positive, expected tuple[str] but got tuple[str, ...]
+ required=tuple(required), # type: ignore
+ properties=properties,
+ )
-def _tool_json_schema(func: Callable):
+class JSONSchemaFunction(TypedDict):
+ name: str
+ description: str
+ parameters: JSONSchemaFunctionParameters
+
+
+def _tool_json_schema(func: Callable) -> JSONSchemaFunction:
tool_name = func.__name__
description = func.__doc__
if description is None:
description = ""
- return {
- "name": tool_name,
- "description": description,
- "parameters": _parse_tool_parameters_schema(func),
- }
+ return JSONSchemaFunction(
+ name=tool_name,
+ description=description,
+ parameters=_parse_tool_parameters_schema(func),
+ )
+
+
+class ToolKernel(TypedDict):
+ source_code: str
+ function: JSONSchemaFunction
+ tool_type: Literal["json_schema"]
+ strict: Literal[True]
-def _extract_tool_kernel(func: Callable) -> dict:
- return {
- "source_code": textwrap.dedent(
+def _extract_tool_kernel(func: Callable) -> ToolKernel:
+ return ToolKernel(
+ source_code=textwrap.dedent(
# Remove the tool decorator from source code
inspect.getsource(func).split("\n", maxsplit=1)[1]
),
- "function": _tool_json_schema(func),
- "tool_type": "json_schema",
- "strict": True,
- }
+ function=_tool_json_schema(func=func),
+ tool_type="json_schema",
+ strict=True,
+ )
def tool(path: Optional[str] = None, attributes: Optional[dict[str, typing.Any]] = None):
def decorator(func: Callable):
- func.json_schema = _tool_json_schema(func)
- decorator.__hl_file_id = uuid.uuid4()
+ # Complains about adding attribute on function
+ # Nice UX, but mypy doesn't like it
+ func.json_schema = _tool_json_schema(func) # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 3efcc894..7cd06b6f 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -1,14 +1,16 @@
+from typing import Optional
+
from opentelemetry import baggage
+from opentelemetry.context import Context
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.trace import Tracer
from humanloop.otel.constants import HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import module_is_installed
-
_TRACER = None
-_BAGGAGE_CONTEXT = [{}]
+_BAGGAGE_CONTEXT: list[Context] = [Context()]
def set_tracer(tracer: Tracer):
@@ -72,7 +74,7 @@ def pop_trace_context():
_BAGGAGE_CONTEXT.pop()
-def get_trace_context() -> dict:
+def get_trace_context() -> Optional[object]:
"""Get Trace parent metadata for Flows."""
global _BAGGAGE_CONTEXT
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 411ec7a3..29123996 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,12 +1,16 @@
+import typing
from queue import Queue
+from typing import Any, Optional
from threading import Thread
-import typing
+
from opentelemetry import trace
-from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
from humanloop.otel.helpers import read_from_opentelemetry_span
+from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
+from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
if typing.TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -19,17 +23,18 @@ class HumanloopSpanExporter(SpanExporter):
def __init__(self, client: "BaseHumanloop") -> None:
super().__init__()
- self._client = client
- self._uploaded_log_ids = {}
- self._upload_queue = Queue()
- self._threads = [Thread(target=self._do_work, daemon=True) for _ in range(self.WORK_THREADS)]
- self._shutdown = False
+ self._client: "BaseHumanloop" = client
+ self._uploaded_log_ids: dict[str, str] = {}
+ self._upload_queue: Queue = Queue()
+ self._threads: list[Thread] = [Thread(target=self._do_work, daemon=True) for _ in range(self.WORK_THREADS)]
+ self._shutdown: bool = False
for thread in self._threads:
thread.start()
- def export(self, spans: trace.Sequence[Span]) -> SpanExportResult:
+ def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
for span in spans:
self._upload_queue.put(span)
+ return SpanExportResult.SUCCESS
def shutdown(self) -> None:
self._shutdown = True
@@ -51,7 +56,7 @@ def _do_work(self):
try:
# Don't block or the thread will never see the shutdown
# command and will get stuck
- span_to_export: Span = self._upload_queue.get(block=False)
+ span_to_export = self._upload_queue.get(block=False)
except Exception:
continue
try:
@@ -67,19 +72,21 @@ def _do_work(self):
self._upload_queue.put(span_to_export)
self._upload_queue.task_done()
- def _export_prompt(self, span: Span) -> None:
- file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ def _export_prompt(self, span: ReadableSpan) -> None:
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ trace_metadata: Optional[dict[str, str]]
try:
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
except KeyError:
trace_metadata = None
if trace_metadata:
trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
else:
trace_parent_id = None
- prompt = file_object["prompt"]
- path = file_object["path"]
+ prompt: Optional[PromptKernelRequestParams] = file_object["prompt"]
+ path: str = file_object["path"]
response = self._client.prompts.log(
path=path,
prompt=prompt,
@@ -88,11 +95,13 @@ def _export_prompt(self, span: Span) -> None:
)
self._uploaded_log_ids[span.context.span_id] = response.id
- def _export_tool(self, span: Span) -> None:
- file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ def _export_tool(self, span: ReadableSpan) -> None:
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ trace_metadata: Optional[dict[str, str]]
try:
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
except KeyError:
trace_metadata = None
if trace_metadata:
@@ -100,7 +109,7 @@ def _export_tool(self, span: Span) -> None:
else:
trace_parent_id = None
tool = file_object["tool"]
- path = file_object["path"]
+ path: str = file_object["path"]
response = self._client.tools.log(
path=path,
tool=tool,
@@ -109,23 +118,25 @@ def _export_tool(self, span: Span) -> None:
)
self._uploaded_log_ids[span.context.span_id] = response.id
- def _export_flow(self, span: Span) -> None:
- file_object = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ def _export_flow(self, span: ReadableSpan) -> None:
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ trace_metadata: Optional[dict[str, str]]
try:
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY)
+ # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
+ trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
except KeyError:
trace_metadata = None
if trace_metadata and "trace_parent_id" in trace_metadata:
trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
else:
trace_parent_id = None
- flow = file_object["flow"]
+ flow: Optional[FlowKernelRequestParams] = file_object["flow"]
if flow == OT_EMPTY_ATTRIBUTE:
flow = {
"attributes": {},
}
- path = file_object["path"]
+ path: str = file_object["path"]
response = self._client.flows.log(
path=path,
flow=flow,
@@ -134,7 +145,7 @@ def _export_flow(self, span: Span) -> None:
)
self._uploaded_log_ids[span.context.span_id] = response.id
- def _export_dispatch(self, span: Span) -> None:
+ def _export_dispatch(self, span: ReadableSpan) -> None:
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
if "prompt" in hl_file:
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index c5aff4fb..f277028d 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -2,11 +2,23 @@
from typing import Any, Union
from opentelemetry.sdk.trace import ReadableSpan
+from opentelemetry.util.types import AttributeValue
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+NestedDict = dict[str, Union["NestedDict", AttributeValue]]
+NestedList = list[Union["NestedList", NestedDict]]
-def write_to_opentelemetry_span(span: ReadableSpan, value: Any, key: str = "") -> None:
+
+def _list_to_ott(lst: NestedList) -> NestedDict:
+ return {str(idx): val if not isinstance(val, list) else _list_to_ott(val) for idx, val in enumerate(lst)}
+
+
+def write_to_opentelemetry_span(
+ span: ReadableSpan,
+ value: Union[NestedDict, NestedList],
+ key: str = "",
+) -> None:
"""Reverse of read_from_opentelemetry_span. Writes a Python object to the OpenTelemetry Span's attributes.
See `read_from_opentelemetry_span` for more information.
@@ -17,17 +29,25 @@ def write_to_opentelemetry_span(span: ReadableSpan, value: Any, key: str = "") -
key: Key prefix to write to the span attributes. The path to the values does not
need to exist in the span attributes.
"""
- to_write: dict[str, Any] = {}
- _linear_object(to_write, value)
- for k, v in to_write.items():
- # OTT
- if v is not None:
- span._attributes[f"{key}.{k}" if key != "" else k] = v
- # with _cache_lock:
- # _cache[(span.context.span_id, key)] = value
+ to_write_copy: Union[dict, AttributeValue]
+ if isinstance(value, list):
+ to_write_copy = _list_to_ott(value)
+ else:
+ to_write_copy = dict(value)
+ linearised_attributes: dict[str, AttributeValue] = {}
+ work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
+ while len(work_stack) > 0:
+ key, value = work_stack.pop() # type: ignore
+ if isinstance(value, dict):
+ for sub_key, sub_value in value.items():
+ work_stack.append((f"{key}.{sub_key}" if key else sub_key, sub_value))
+ else:
+ linearised_attributes[key] = value # type: ignore
+ for final_key, final_value in linearised_attributes.items():
+ span._attributes[final_key] = final_value # type: ignore
-def read_from_opentelemetry_span(span: ReadableSpan, key: str = ""):
+def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDict:
"""Read a value from the OpenTelemetry span attributes.
OpenTelemetry liniarises dictionaries and lists, storing only primitive values
@@ -80,11 +100,13 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = ""):
[42, 43]
```
"""
+ if span._attributes is None:
+ raise ValueError("Span attributes are empty")
- result = dict()
+ result: dict[str, Union[dict, AttributeValue]] = {}
- to_process: list[tuple[str, Any]] = []
- for span_key, span_value in span._attributes.items():
+ to_process: list[tuple[str, Union[dict, AttributeValue]]] = []
+ for span_key, span_value in span._attributes.items(): # type: ignore
if key == "":
# No key prefix, add to root
to_process.append((f"{key}.{span_key}", span_value))
@@ -98,81 +120,27 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = ""):
return result
raise KeyError(f"Key {key} not found in span attributes")
- for span_key, span_value in to_process:
+ for span_key, span_value in to_process: # type: ignore
parts = span_key.split(".")
len_parts = len(parts)
- sub_result = result
+ sub_result: dict[str, Union[dict, AttributeValue]] = result
for idx, part in enumerate(parts):
if idx == len_parts - 1:
sub_result[part] = span_value
else:
if part not in sub_result:
sub_result[part] = dict()
- sub_result = sub_result[part]
+ sub_result = sub_result[part] # type: ignore
- result = _dict_to_list(result)
for part in key.split("."):
- result = result[part]
- return result
-
-
-def _linear_object(obj: dict, current: Union[list, dict, Any], key: str = ""):
- """Linearise a Python object into a dictionary.
-
- Method recurses on the `current` argument, collecting all primitive values and their
- path in the objects, then storing them in the `obj` dictionary in the end.
-
- Arguments:
- obj: Dictionary to store the linearised object
- current: Python object to linearise. Used in recursivity when a complex
- value is encountered.
- key: Key prefix to store the values in the `obj` dictionary. Keys are added
- incrementally as the function recurses.
-
- Examples:
- ```python
- result = dict()
- _linear_object(result, {'a': 1, 'b': {'c': 2, d: [4, 5]}})
+ result = result[part] # type: ignore
- # result is now:
- {
- 'a': 1,
- 'b.c': 2,
- 'b.d.0': 4,
- 'b.d.1': 5
- }
- ```
-
- """
- if isinstance(current, builtins.dict):
- for k, v in current.items():
- _linear_object(obj, v, f"{key}.{k}" if key != "" else k)
- elif isinstance(current, list):
- for idx, v in enumerate(current):
- _linear_object(obj, v, f"{key}.{idx}" if key != "" else str(idx))
- else:
- obj[key] = current
-
-
-def _dict_to_list(d: dict[str, Any]) -> Union[list, dict]:
- """Interpret number keys parsed by the read_from_opentelemetry_span function as lists.
-
- read_from_opentelemetry_span assumes all sub-keys in a path such as foo.0.bar are keys in
- dictionaries. This method revisits the final result, and transforms the keys in lists where
- appropriate.
- """
- is_list = all(key.isdigit() for key in d.keys())
- if is_list:
- return [_dict_to_list(val) if isinstance(val, dict) else val for val in d.values()]
- for key, value in d.items():
- if isinstance(value, dict):
- d[key] = _dict_to_list(value)
- return d
+ return result
def is_llm_provider_call(span: ReadableSpan) -> bool:
"""Determines if the span was created by an Instrumentor for LLM provider clients."""
- return "llm.request.type" in span.attributes
+ return "llm.request.type" in span.attributes # type: ignore
def is_humanloop_span(span: ReadableSpan) -> bool:
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index cd300482..18a8819f 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -1,8 +1,10 @@
import json
import logging
from collections import defaultdict
+from typing import Any
-import parse
+# No typing stubs for parse
+import parse # type: ignore
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
@@ -22,8 +24,8 @@ class HumanloopSpanProcessor(SimpleSpanProcessor):
def __init__(self, exporter: SpanExporter) -> None:
super().__init__(exporter)
- self._spans = dict()
- self._children = defaultdict(list)
+ # Span parent to Span children map
+ self._children: dict[int, list] = defaultdict(list)
# TODO: Could override on_start and process Flow spans ahead of time
# and PATCH the created Logs in on_end. A special type of ReadableSpan could be
@@ -73,9 +75,11 @@ def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan
def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
# TODO: Use children_spans in the future
tool_log = read_from_opentelemetry_span(tool_span, key=HL_LOG_OT_KEY)
- tool_log["start_time"] = tool_span.start_time / 1e9
- tool_log["end_time"] = tool_span.end_time / 1e9
- tool_log["created_at"] = tool_span.end_time / 1e9
+ if tool_span.start_time:
+ tool_log["start_time"] = tool_span.start_time / 1e9
+ if tool_span.end_time:
+ tool_log["end_time"] = tool_span.end_time / 1e9
+ tool_log["created_at"] = tool_span.end_time / 1e9
write_to_opentelemetry_span(
span=tool_span,
@@ -87,9 +91,11 @@ def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
# TODO: Use children_spans in the future
flow_log = read_from_opentelemetry_span(flow_span, key=HL_LOG_OT_KEY)
- flow_log["start_time"] = flow_span.start_time / 1e9
- flow_log["end_time"] = flow_span.end_time / 1e9
- flow_log["created_at"] = flow_span.end_time / 1e9
+ if flow_span.start_time:
+ flow_log["start_time"] = flow_span.start_time / 1e9
+ if flow_span.end_time:
+ flow_log["end_time"] = flow_span.end_time / 1e9
+ flow_log["created_at"] = flow_span.end_time / 1e9
write_to_opentelemetry_span(
span=flow_span,
@@ -99,11 +105,11 @@ def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
- gen_ai_object = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
- llm_object = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
+ gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
+ llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt_kernel = hl_file.get("prompt", {})
+ prompt_kernel: dict[str, Any] = hl_file.get("prompt", {})
if "model" not in prompt_kernel:
prompt_kernel["model"] = gen_ai_object.get("request", {}).get("model", None)
if "endpoint" not in prompt_kernel:
@@ -132,25 +138,27 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
def _enrich_prompt_span_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
- hl_log = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
- gen_ai_object: dict = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
+ gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
# TODO: Seed not added by Instrumentors in provider call
if "output_tokens" not in hl_log:
hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens")
if len(gen_ai_object.get("completion", [])) > 0:
- hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason")
+ hl_log["finish_reason"] = gen_ai_object.get("completion", {}).get("0", {}).get("finish_reason")
hl_log["messages"] = gen_ai_object.get("prompt", [])
- hl_log["start_time"] = prompt_span.start_time / 1e9
- hl_log["end_time"] = prompt_span.start_time / 1e9
- hl_log["created_at"] = prompt_span.start_time / 1e9
+ if prompt_span.start_time:
+ hl_log["start_time"] = prompt_span.start_time / 1e9
+ if prompt_span.end_time:
+ hl_log["end_time"] = prompt_span.end_time / 1e9
+ hl_log["created_at"] = prompt_span.end_time / 1e9
try:
inputs = {}
- system_message = gen_ai_object["prompt"][0]["content"]
+ system_message = gen_ai_object["prompt"]["0"]["content"]
template = hl_file["prompt"]["template"]
parsed = parse.parse(template, system_message)
for key, value in parsed.named.items():
diff --git a/tests/conftest.py b/tests/conftest.py
index 2ad34511..12075e16 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -2,10 +2,12 @@
from unittest.mock import MagicMock
import pytest
-from opentelemetry import trace
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
from opentelemetry.sdk.resources import Resource
-from opentelemetry.sdk.trace import Tracer, TracerProvider
+from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.trace import Tracer
+
+from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
@@ -99,7 +101,7 @@ def opentelemetry_hl_with_exporter_test_configuration(
@pytest.fixture(scope="session")
-def call_llm_messages() -> list[dict]:
+def call_llm_messages() -> list[ChatCompletionMessageParam]:
return [
{
"role": "system",
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index e12f6322..5995f5cd 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -5,16 +5,16 @@
from unittest.mock import patch
import pytest
-from openai import OpenAI
-from opentelemetry.sdk.trace import Tracer
-from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-
from humanloop.decorators.flow import flow
+from humanloop.decorators.prompt import prompt
+from humanloop.decorators.tool import tool
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_TRACE_METADATA_KEY
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.helpers import read_from_opentelemetry_span
-from humanloop.decorators.prompt import prompt
-from humanloop.decorators.tool import tool
+from openai import OpenAI
+from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
+from opentelemetry.sdk.trace import Tracer
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
@tool()
@@ -26,7 +26,7 @@ def _random_string() -> str:
@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
-def _call_llm(messages: list[dict]) -> str:
+def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
# NOTE: These tests check if instrumentors are capable of intercepting OpenAI
# provider calls. Could not find a way to intercept them coming from a Mock.
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
@@ -157,6 +157,7 @@ def test_hl_exporter_with_flow(
call_llm_messages: list[dict],
opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
):
+ # NOTE: type ignore comments are caused by the MagicMock used to mock _client
# GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter
_, exporter = opentelemetry_hl_with_exporter_test_configuration
with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
@@ -167,7 +168,7 @@ def test_hl_exporter_with_flow(
middle_exported_span = mock_export_method.call_args_list[1][0][0][0]
last_exported_span = mock_export_method.call_args_list[2][0][0][0]
# THEN the last uploaded span is the Flow
- assert read_from_opentelemetry_span(span=last_exported_span, key=HL_FILE_OT_KEY)["flow"]["attributes"] == {
+ assert read_from_opentelemetry_span(span=last_exported_span, key=HL_FILE_OT_KEY)["flow"]["attributes"] == { # type: ignore[index,call-overload]
"foo": "bar",
"baz": 7,
}
@@ -181,23 +182,23 @@ def test_hl_exporter_with_flow(
time.sleep(3)
# THEN the first Log uploaded is the Flow
- first_log = exporter._client.flows.log.call_args_list[0][1]
+ first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore[attr-defined]
assert "flow" in first_log
- exporter._client.flows.log.assert_called_once()
- flow_log_call_args = exporter._client.flows.log.call_args_list[0]
+ exporter._client.flows.log.assert_called_once() # type: ignore[attr-defined]
+ flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore[attr-defined]
flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7}
- flow_log_id = exporter._client.flows.log.return_value
+ flow_log_id = exporter._client.flows.log.return_value # type: ignore[attr-defined]
# THEN the second Log uploaded is the Prompt
- exporter._client.prompts.log.assert_called_once()
- prompt_log_call_args = exporter._client.prompts.log.call_args_list[0]
+ exporter._client.prompts.log.assert_called_once() # type: ignore[attr-defined]
+ prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore[attr-defined]
prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id
prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8
- prompt_log_id = exporter._client.prompts.log.return_value
+ prompt_log_id = exporter._client.prompts.log.return_value # type: ignore[attr-defined]
# THEN the final Log uploaded is the Tool
- exporter._client.tools.log.assert_called_once()
- tool_log_call_args = exporter._client.tools.log.call_args_list[0]
+ exporter._client.tools.log.assert_called_once() # type: ignore[attr-defined]
+ tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore[attr-defined]
tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index d4a36755..4eebf435 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -1,17 +1,19 @@
import os
+from typing import Any, Optional
+import pytest
from dotenv import load_dotenv
+from humanloop.decorators.prompt import prompt
+from humanloop.otel.constants import HL_FILE_OT_KEY
+from humanloop.otel.helpers import NestedDict, is_humanloop_span, read_from_opentelemetry_span
from openai import OpenAI
+from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-from humanloop.otel.constants import HL_FILE_OT_KEY
-from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
-from humanloop.decorators.prompt import prompt
-
@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
-def _call_llm(messages: list[dict]) -> str:
+def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
load_dotenv()
# NOTE: These tests check if instrumentors are capable of intercepting OpenAI
# provider calls. Could not find a way to intercept them coming from a Mock.
@@ -28,7 +30,7 @@ def _call_llm(messages: list[dict]) -> str:
@prompt(path=None, template="You are an assistant on the following topics: {topics}.", temperature=0.9, top_p=0.1)
-def _call_llm_with_defaults(messages: list[dict]) -> str:
+def _call_llm_with_defaults(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
load_dotenv()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
return (
@@ -44,7 +46,7 @@ def _call_llm_with_defaults(messages: list[dict]) -> str:
def test_prompt(
opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
- call_llm_messages: list[dict],
+ call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN a default OpenTelemetry configuration
_, exporter = opentelemetry_test_configuration
@@ -55,12 +57,12 @@ def test_prompt(
assert len(spans) == 2
# THEN the Prompt span is not enhanced with information from the LLM provider
assert is_humanloop_span(spans[1])
- assert spans[1].attributes.get("prompt") is None
+ assert spans[1].attributes.get("prompt") is None # type: ignore
def test_prompt_hl_processor(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
- call_llm_messages: list[dict],
+ call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
@@ -70,7 +72,7 @@ def test_prompt_hl_processor(
spans = exporter.get_finished_spans()
assert len(spans) == 1
assert is_humanloop_span(spans[0])
- prompt = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"]
+ prompt: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
assert prompt is not None
# THEN temperature is taken from LLM provider call, but top_p is not since it is not specified
assert prompt["temperature"] == 0.8
@@ -79,7 +81,7 @@ def test_prompt_hl_processor(
def test_prompt_with_defaults(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
- call_llm_messages: list[dict],
+ call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
@@ -89,9 +91,39 @@ def test_prompt_with_defaults(
spans = exporter.get_finished_spans()
assert len(spans) == 1
assert is_humanloop_span(spans[0])
- prompt = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"]
+ prompt: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
assert prompt is not None
# THEN temperature is taken from decorator rather than intercepted LLM provider call
assert prompt["temperature"] == 0.9
# THEN top_p is present
assert prompt["top_p"] == 0.1
+
+
+@pytest.mark.parametrize(
+ "hyperparameters",
+ (
+ {"temperature": 1.1},
+ {"top_p": 1.1},
+ {"presence_penalty": 3},
+ {"frequency_penalty": 3},
+ ),
+)
+def test_default_values_fails_out_of_domain(hyperparameters: dict[str, float]):
+ # GIVEN a Prompt decorated function
+ # WHEN using default values that are out of domain
+ # THEN an exception is raised
+ with pytest.raises(ValueError):
+
+ @prompt(path=None, template="You are an assistant on the following topics: {topics}.", **hyperparameters) # type: ignore[arg-type]
+ def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
+ load_dotenv()
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 225e61fa..d3958f17 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -1,13 +1,14 @@
-from opentelemetry.sdk.trace import Tracer
-from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+from typing import Any
+from humanloop.decorators.tool import tool
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
from humanloop.otel.helpers import read_from_opentelemetry_span
-from humanloop.decorators.tool import tool
+from opentelemetry.sdk.trace import Tracer
+from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
@tool()
-def calculator(operation: str, num1: int, num2: int) -> str:
+def calculator(operation: str, num1: float, num2: float) -> float:
"""Do arithmetic operations on two numbers."""
if operation == "add":
return num1 + num2
@@ -18,7 +19,7 @@ def calculator(operation: str, num1: int, num2: int) -> str:
elif operation == "divide":
return num1 / num2
else:
- return "Invalid operation"
+ raise ValueError(f"Invalid operation: {operation}")
def test_calculator_decorator(
@@ -31,8 +32,8 @@ def test_calculator_decorator(
# THEN a single span is created and the log and file attributes are correctly set
spans = exporter.get_finished_spans()
assert len(spans) == 1
- hl_file = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)
- hl_log = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_OT_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_OT_KEY)
assert hl_log["output"] == result == 3
assert hl_log["inputs"] == {
"operation": "add",
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
index da3648c6..04d16a65 100644
--- a/tests/otel/test_helpers.py
+++ b/tests/otel/test_helpers.py
@@ -1,7 +1,6 @@
import pytest
-from opentelemetry.sdk.trace import Span
-
from humanloop.otel.helpers import read_from_opentelemetry_span, write_to_opentelemetry_span
+from opentelemetry.sdk.trace import Span
def test_read_empty(test_span: Span):
@@ -12,7 +11,8 @@ def test_read_non_existent_key(test_span: Span):
with pytest.raises(KeyError):
assert read_from_opentelemetry_span(test_span, "key") == {}
write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, key="key")
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.x": 7,
"key.y": "foo",
}
@@ -22,7 +22,8 @@ def test_read_non_existent_key(test_span: Span):
def test_simple_dict(test_span: Span):
write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"}, "key")
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.x": 7,
"key.y": "foo",
}
@@ -31,7 +32,8 @@ def test_simple_dict(test_span: Span):
def test_no_prefix(test_span: Span):
write_to_opentelemetry_span(test_span, {"x": 7, "y": "foo"})
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"x": 7,
"y": "foo",
}
@@ -40,7 +42,8 @@ def test_no_prefix(test_span: Span):
def test_nested_object(test_span: Span):
write_to_opentelemetry_span(test_span, {"x": 7, "y": {"z": "foo"}}, "key")
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.x": 7,
"key.y.z": "foo",
}
@@ -49,28 +52,30 @@ def test_nested_object(test_span: Span):
def test_list(test_span: Span):
write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}], "key")
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.0.x": 7,
"key.0.y": "foo",
"key.1.z": "bar",
}
- assert read_from_opentelemetry_span(test_span, "key") == [
- {"x": 7, "y": "foo"},
- {"z": "bar"},
- ]
+ assert read_from_opentelemetry_span(test_span, "key") == {
+ "0": {"x": 7, "y": "foo"},
+ "1": {"z": "bar"},
+ }
def test_list_no_prefix(test_span: Span):
write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}])
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"0.x": 7,
"0.y": "foo",
"1.z": "bar",
}
- assert read_from_opentelemetry_span(test_span) == [
- {"x": 7, "y": "foo"},
- {"z": "bar"},
- ]
+ assert read_from_opentelemetry_span(test_span) == {
+ "0": {"x": 7, "y": "foo"},
+ "1": {"z": "bar"},
+ }
def test_multiple_nestings(test_span: Span):
@@ -82,16 +87,20 @@ def test_multiple_nestings(test_span: Span):
],
"key",
)
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.0.x": 7,
"key.0.y": "foo",
"key.1.0.z": "bar",
"key.1.1.a": 42,
}
- assert read_from_opentelemetry_span(test_span, "key") == [
- {"x": 7, "y": "foo"},
- [{"z": "bar"}, {"a": 42}],
- ]
+ assert read_from_opentelemetry_span(test_span, "key") == {
+ "0": {"x": 7, "y": "foo"},
+ "1": {
+ "0": {"z": "bar"},
+ "1": {"a": 42},
+ },
+ }
def test_read_mixed_numeric_string_keys(test_span: Span):
@@ -103,11 +112,11 @@ def test_read_mixed_numeric_string_keys(test_span: Span):
"key.a.a": 42,
}
)
- assert read_from_opentelemetry_span(span=test_span, key="key") == {
+ assert read_from_opentelemetry_span(span=test_span, key="key") == { # type: ignore
"0": {"x": 7, "y": "foo"},
"a": {"z": "bar", "a": 42},
}
- assert read_from_opentelemetry_span(span=test_span) == {
+ assert read_from_opentelemetry_span(span=test_span) == { # type: ignore
"key": {
"0": {"x": 7, "y": "foo"},
"a": {"z": "bar", "a": 42},
@@ -117,7 +126,8 @@ def test_read_mixed_numeric_string_keys(test_span: Span):
def test_sub_key_same_as_key(test_span: Span):
write_to_opentelemetry_span(test_span, {"key": 7}, "key")
- assert dict(test_span.attributes) == {
+ # NOTE: attributes cannot be None at this point
+ assert dict(test_span.attributes) == { # type: ignore
"key.key": 7,
}
assert read_from_opentelemetry_span(test_span, "key") == {"key": 7}
@@ -137,17 +147,19 @@ def test_write_read_sub_key(test_span: Span):
def test_write_drops_dict_all_null_values(test_span: Span):
# GIVEN a test_span to which a value with null values is written
- write_to_opentelemetry_span(test_span, {"x": None, "y": None}, "key")
+ # NOTE: mypy complains about None value in the dict, but it is intentionally under test
+ write_to_opentelemetry_span(test_span, {"x": None, "y": None}, "key") # type: ignore
# WHEN reading the value from the span
# THEN the value is not present in the span attributes
- assert "key" not in test_span.attributes
+ assert "key" not in test_span.attributes # type: ignore
with pytest.raises(KeyError):
read_from_opentelemetry_span(test_span, "key") == {}
def test_write_drops_null_value_from_dict(test_span: Span):
# GIVEN a test_span to which a dict with some null values are written
- write_to_opentelemetry_span(test_span, {"x": 2, "y": None}, "key")
+ # NOTE: mypy complains about None value in the dict, but it is intentionally under test
+ write_to_opentelemetry_span(test_span, {"x": 2, "y": None}, "key") # type: ignore
# WHEN reading the values from the span
# THEN the value with null value is not present in the span attributes
read_from_opentelemetry_span(test_span, "key") == {"x": 2}
From 02fcab7753d5c67a26d76a9f31b32b509eb886ae Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 28 Oct 2024 18:51:04 +0200
Subject: [PATCH 14/70] More unit tests for prompt decorators
---
poetry.lock | 556 +++++++++++++++++++++-
pyproject.toml | 9 +-
src/humanloop/otel/__init__.py | 11 +
src/humanloop/otel/exporter.py | 2 +-
src/humanloop/otel/processor.py | 14 +
tests/conftest.py | 33 +-
tests/decorators/test_prompt_decorator.py | 157 ++++--
7 files changed, 737 insertions(+), 45 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 11aa6334..64ce6c02 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -11,15 +11,40 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
+[[package]]
+name = "anthropic"
+version = "0.37.1"
+description = "The official Python library for the anthropic API"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "anthropic-0.37.1-py3-none-any.whl", hash = "sha256:8f550f88906823752e2abf99fbe491fbc8d40bce4cb26b9663abdf7be990d721"},
+ {file = "anthropic-0.37.1.tar.gz", hash = "sha256:99f688265795daa7ba9256ee68eaf2f05d53cd99d7417f4a0c2dc292c106d00a"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+jiter = ">=0.4.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+tokenizers = ">=0.13.0"
+typing-extensions = ">=4.7,<5"
+
+[package.extras]
+bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"]
+vertex = ["google-auth (>=2,<3)"]
+
[[package]]
name = "anyio"
-version = "4.5.2"
+version = "4.6.2.post1"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
- {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
+ {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"},
+ {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"},
]
[package.dependencies]
@@ -158,6 +183,32 @@ files = [
{file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"},
]
+[[package]]
+name = "cohere"
+version = "5.11.2"
+description = ""
+optional = false
+python-versions = "<4.0,>=3.8"
+files = [
+ {file = "cohere-5.11.2-py3-none-any.whl", hash = "sha256:310adb975817068488ba60d2d39e65b8fd28756df9a4905d5b16a69f79d78db7"},
+ {file = "cohere-5.11.2.tar.gz", hash = "sha256:99498e20343947ef1e1e01165312dd2fbf40be4f9eac336f9b71efba55e7ba6e"},
+]
+
+[package.dependencies]
+fastavro = ">=1.9.4,<2.0.0"
+httpx = ">=0.21.2"
+httpx-sse = "0.4.0"
+parameterized = ">=0.9.0,<0.10.0"
+pydantic = ">=1.9.2"
+pydantic-core = ">=2.18.2,<3.0.0"
+requests = ">=2.0.0,<3.0.0"
+tokenizers = ">=0.15,<1"
+types-requests = ">=2.0.0,<3.0.0"
+typing_extensions = ">=4.0.0"
+
+[package.extras]
+aws = ["boto3 (>=1.34.0,<2.0.0)", "sagemaker (>=2.232.1,<3.0.0)"]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -211,6 +262,126 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "fastavro"
+version = "1.9.7"
+description = "Fast read/write of AVRO files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fastavro-1.9.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc811fb4f7b5ae95f969cda910241ceacf82e53014c7c7224df6f6e0ca97f52f"},
+ {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb8749e419a85f251bf1ac87d463311874972554d25d4a0b19f6bdc56036d7cf"},
+ {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f9bafa167cb4d1c3dd17565cb5bf3d8c0759e42620280d1760f1e778e07fc"},
+ {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e87d04b235b29f7774d226b120da2ca4e60b9e6fdf6747daef7f13f218b3517a"},
+ {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b525c363e267ed11810aaad8fbdbd1c3bd8837d05f7360977d72a65ab8c6e1fa"},
+ {file = "fastavro-1.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:6312fa99deecc319820216b5e1b1bd2d7ebb7d6f221373c74acfddaee64e8e60"},
+ {file = "fastavro-1.9.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec8499dc276c2d2ef0a68c0f1ad11782b2b956a921790a36bf4c18df2b8d4020"},
+ {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d9d96f98052615ab465c63ba8b76ed59baf2e3341b7b169058db104cbe2aa0"},
+ {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919f3549e07a8a8645a2146f23905955c35264ac809f6c2ac18142bc5b9b6022"},
+ {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9de1fa832a4d9016724cd6facab8034dc90d820b71a5d57c7e9830ffe90f31e4"},
+ {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1d09227d1f48f13281bd5ceac958650805aef9a4ef4f95810128c1f9be1df736"},
+ {file = "fastavro-1.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:2db993ae6cdc63e25eadf9f93c9e8036f9b097a3e61d19dca42536dcc5c4d8b3"},
+ {file = "fastavro-1.9.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4e1289b731214a7315884c74b2ec058b6e84380ce9b18b8af5d387e64b18fc44"},
+ {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eac69666270a76a3a1d0444f39752061195e79e146271a568777048ffbd91a27"},
+ {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9be089be8c00f68e343bbc64ca6d9a13e5e5b0ba8aa52bcb231a762484fb270e"},
+ {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d576eccfd60a18ffa028259500df67d338b93562c6700e10ef68bbd88e499731"},
+ {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee9bf23c157bd7dcc91ea2c700fa3bd924d9ec198bb428ff0b47fa37fe160659"},
+ {file = "fastavro-1.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:b6b2ccdc78f6afc18c52e403ee68c00478da12142815c1bd8a00973138a166d0"},
+ {file = "fastavro-1.9.7-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7313def3aea3dacface0a8b83f6d66e49a311149aa925c89184a06c1ef99785d"},
+ {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f5644737ad21d18af97d909dba099b9e7118c237be7e4bd087c7abde7e4f0"},
+ {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2af559f30383b79cf7d020a6b644c42ffaed3595f775fe8f3d7f80b1c43dfdc5"},
+ {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:edc28ab305e3c424de5ac5eb87b48d1e07eddb6aa08ef5948fcda33cc4d995ce"},
+ {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ec2e96bdabd58427fe683329b3d79f42c7b4f4ff6b3644664a345a655ac2c0a1"},
+ {file = "fastavro-1.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:3b683693c8a85ede496ebebe115be5d7870c150986e34a0442a20d88d7771224"},
+ {file = "fastavro-1.9.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:58f76a5c9a312fbd37b84e49d08eb23094d36e10d43bc5df5187bc04af463feb"},
+ {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56304401d2f4f69f5b498bdd1552c13ef9a644d522d5de0dc1d789cf82f47f73"},
+ {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fcce036c6aa06269fc6a0428050fcb6255189997f5e1a728fc461e8b9d3e26b"},
+ {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:17de68aae8c2525f5631d80f2b447a53395cdc49134f51b0329a5497277fc2d2"},
+ {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7c911366c625d0a997eafe0aa83ffbc6fd00d8fd4543cb39a97c6f3b8120ea87"},
+ {file = "fastavro-1.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:912283ed48578a103f523817fdf0c19b1755cea9b4a6387b73c79ecb8f8f84fc"},
+ {file = "fastavro-1.9.7.tar.gz", hash = "sha256:13e11c6cb28626da85290933027cd419ce3f9ab8e45410ef24ce6b89d20a1f6c"},
+]
+
+[package.extras]
+codecs = ["cramjam", "lz4", "zstandard"]
+lz4 = ["lz4"]
+snappy = ["cramjam"]
+zstandard = ["zstandard"]
+
+[[package]]
+name = "filelock"
+version = "3.16.1"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"},
+ {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"},
+]
+
+[package.extras]
+docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"]
+typing = ["typing-extensions (>=4.12.2)"]
+
+[[package]]
+name = "fsspec"
+version = "2024.10.0"
+description = "File-system specification"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"},
+ {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+dev = ["pre-commit", "ruff"]
+doc = ["numpydoc", "sphinx", "sphinx-design", "sphinx-rtd-theme", "yarl"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+test = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "numpy", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "requests"]
+test-downstream = ["aiobotocore (>=2.5.4,<3.0.0)", "dask-expr", "dask[dataframe,test]", "moto[server] (>4,<5)", "pytest-timeout", "xarray"]
+test-full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "cloudpickle", "dask", "distributed", "dropbox", "dropboxdrivefs", "fastparquet", "fusepy", "gcsfs", "jinja2", "kerchunk", "libarchive-c", "lz4", "notebook", "numpy", "ocifs", "pandas", "panel", "paramiko", "pyarrow", "pyarrow (>=1)", "pyftpdlib", "pygit2", "pytest", "pytest-asyncio (!=0.22.0)", "pytest-benchmark", "pytest-cov", "pytest-mock", "pytest-recording", "pytest-rerunfailures", "python-snappy", "requests", "smbprotocol", "tqdm", "urllib3", "zarr", "zstandard"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "groq"
+version = "0.11.0"
+description = "The official Python library for the groq API"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "groq-0.11.0-py3-none-any.whl", hash = "sha256:e328531c979542e563668c62260aec13b43a6ee0ca9e2fb22dff1d26f8c8ce54"},
+ {file = "groq-0.11.0.tar.gz", hash = "sha256:dbb9aefedf388ddd4801ec7bf3eba7f5edb67948fec0cd2829d97244059f42a7"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<5"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+pydantic = ">=1.9.0,<3"
+sniffio = "*"
+typing-extensions = ">=4.7,<5"
+
[[package]]
name = "h11"
version = "0.14.0"
@@ -279,6 +450,40 @@ files = [
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
]
+[[package]]
+name = "huggingface-hub"
+version = "0.26.1"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "huggingface_hub-0.26.1-py3-none-any.whl", hash = "sha256:5927a8fc64ae68859cd954b7cc29d1c8390a5e15caba6d3d349c973be8fdacf3"},
+ {file = "huggingface_hub-0.26.1.tar.gz", hash = "sha256:414c0d9b769eecc86c70f9d939d0f48bb28e8461dd1130021542eff0212db890"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = ">=2023.5.0"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.5.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+hf-transfer = ["hf-transfer (>=0.1.4)"]
+inference = ["aiohttp"]
+quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.5.0)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+tensorflow-testing = ["keras (<3.0)", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["safetensors[torch]", "torch"]
+typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
+
[[package]]
name = "idna"
version = "3.10"
@@ -405,6 +610,22 @@ files = [
{file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"},
]
+[[package]]
+name = "mistralai"
+version = "0.4.2"
+description = ""
+optional = false
+python-versions = "<4.0,>=3.9"
+files = [
+ {file = "mistralai-0.4.2-py3-none-any.whl", hash = "sha256:63c98eea139585f0a3b2c4c6c09c453738bac3958055e6f2362d3866e96b0168"},
+ {file = "mistralai-0.4.2.tar.gz", hash = "sha256:5eb656710517168ae053f9847b0bb7f617eda07f1f93f946ad6c91a4d407fd93"},
+]
+
+[package.dependencies]
+httpx = ">=0.25,<1"
+orjson = ">=3.9.10,<3.11"
+pydantic = ">=2.5.2,<3"
+
[[package]]
name = "mypy"
version = "1.0.1"
@@ -551,6 +772,23 @@ opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.1"
+[[package]]
+name = "opentelemetry-instrumentation-groq"
+version = "0.33.3"
+description = "OpenTelemetry Groq instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_groq-0.33.3-py3-none-any.whl", hash = "sha256:53d75f8ec2dbcf5e0f06ed53a7a4cb875823749cb96bbc07dbb7a1d5ee374e32"},
+ {file = "opentelemetry_instrumentation_groq-0.33.3.tar.gz", hash = "sha256:98408aaf91e2d55ad348deb12666339fbcb972b18ec511c4f394d3fac37041eb"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+
[[package]]
name = "opentelemetry-instrumentation-mistralai"
version = "0.33.3"
@@ -586,6 +824,23 @@ opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.1"
tiktoken = ">=0.6.0,<1"
+[[package]]
+name = "opentelemetry-instrumentation-replicate"
+version = "0.33.3"
+description = "OpenTelemetry Replicate instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_replicate-0.33.3-py3-none-any.whl", hash = "sha256:c2870c1939b69ff3c57a508404cec75329e07c907eb9600f47ec64be2c0b8310"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.3.tar.gz", hash = "sha256:06c9f63f7c235392567b10efe20f8cb2379f322d0a72e4c52ab4912f1ebb943a"},
+]
+
+[package.dependencies]
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions-ai = "0.4.1"
+
[[package]]
name = "opentelemetry-sdk"
version = "1.27.0"
@@ -628,6 +883,73 @@ files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"},
]
+[[package]]
+name = "orjson"
+version = "3.10.10"
+description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"},
+ {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"},
+ {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"},
+ {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"},
+ {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"},
+ {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"},
+ {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"},
+ {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"},
+ {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"},
+ {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"},
+ {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"},
+ {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"},
+ {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"},
+ {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"},
+ {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"},
+ {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"},
+ {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"},
+ {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"},
+ {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"},
+ {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"},
+ {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"},
+ {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"},
+ {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"},
+ {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"},
+ {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"},
+ {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"},
+ {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"},
+ {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"},
+ {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"},
+ {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"},
+ {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"},
+ {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"},
+ {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"},
+ {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"},
+ {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"},
+ {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"},
+ {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"},
+ {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"},
+ {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"},
+ {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"},
+ {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"},
+ {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"},
+ {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"},
+ {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"},
+ {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"},
+ {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"},
+ {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"},
+ {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"},
+ {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"},
+ {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"},
+ {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"},
+ {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"},
+ {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"},
+ {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"},
+ {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"},
+ {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"},
+ {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"},
+ {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"},
+]
+
[[package]]
name = "packaging"
version = "24.2"
@@ -639,6 +961,20 @@ files = [
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
+[[package]]
+name = "parameterized"
+version = "0.9.0"
+description = "Parameterized testing with any Python test framework"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"},
+ {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"},
+]
+
+[package.extras]
+dev = ["jinja2"]
+
[[package]]
name = "parse"
version = "1.20.2"
@@ -877,6 +1213,68 @@ files = [
[package.extras]
cli = ["click (>=5.0)"]
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
+ {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"},
+ {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"},
+ {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"},
+ {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"},
+ {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"},
+ {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"},
+ {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"},
+ {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"},
+ {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"},
+ {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"},
+ {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"},
+ {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"},
+ {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"},
+ {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"},
+ {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"},
+ {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"},
+ {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"},
+ {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"},
+ {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"},
+ {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"},
+ {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"},
+ {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"},
+ {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"},
+ {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"},
+ {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
+]
+
[[package]]
name = "regex"
version = "2024.9.11"
@@ -980,6 +1378,23 @@ files = [
{file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"},
]
+[[package]]
+name = "replicate"
+version = "1.0.3"
+description = "Python client for Replicate"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "replicate-1.0.3-py3-none-any.whl", hash = "sha256:8c49d63444b7ea9ac1d6af99eb23a01efb5b7f079cc8a020d6f52b38843db1da"},
+ {file = "replicate-1.0.3.tar.gz", hash = "sha256:0fd9ca5230fe67c42e4508dd96a5b1414b3fefa5342f8921dbb63c74266cb130"},
+]
+
+[package.dependencies]
+httpx = ">=0.21.0,<1"
+packaging = "*"
+pydantic = ">1.10.7"
+typing-extensions = ">=4.5.0"
+
[[package]]
name = "requests"
version = "2.32.3"
@@ -1117,6 +1532,123 @@ requests = ">=2.26.0"
[package.extras]
blobfile = ["blobfile (>=2)"]
+[[package]]
+name = "tokenizers"
+version = "0.20.1"
+description = ""
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tokenizers-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:439261da7c0a5c88bda97acb284d49fbdaf67e9d3b623c0bfd107512d22787a9"},
+ {file = "tokenizers-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03dae629d99068b1ea5416d50de0fea13008f04129cc79af77a2a6392792d93c"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61f561f329ffe4b28367798b89d60c4abf3f815d37413b6352bc6412a359867"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec870fce1ee5248a10be69f7a8408a234d6f2109f8ea827b4f7ecdbf08c9fd15"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d388d1ea8b7447da784e32e3b86a75cce55887e3b22b31c19d0b186b1c677800"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:299c85c1d21135bc01542237979bf25c32efa0d66595dd0069ae259b97fb2dbe"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e96f6c14c9752bb82145636b614d5a78e9cde95edfbe0a85dad0dd5ddd6ec95c"},
+ {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc9e95ad49c932b80abfbfeaf63b155761e695ad9f8a58c52a47d962d76e310f"},
+ {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f22dee205329a636148c325921c73cf3e412e87d31f4d9c3153b302a0200057b"},
+ {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2ffd9a8895575ac636d44500c66dffaef133823b6b25067604fa73bbc5ec09d"},
+ {file = "tokenizers-0.20.1-cp310-none-win32.whl", hash = "sha256:2847843c53f445e0f19ea842a4e48b89dd0db4e62ba6e1e47a2749d6ec11f50d"},
+ {file = "tokenizers-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:f9aa93eacd865f2798b9e62f7ce4533cfff4f5fbd50c02926a78e81c74e432cd"},
+ {file = "tokenizers-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4a717dcb08f2dabbf27ae4b6b20cbbb2ad7ed78ce05a829fae100ff4b3c7ff15"},
+ {file = "tokenizers-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f84dad1ff1863c648d80628b1b55353d16303431283e4efbb6ab1af56a75832"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:929c8f3afa16a5130a81ab5079c589226273ec618949cce79b46d96e59a84f61"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d10766473954397e2d370f215ebed1cc46dcf6fd3906a2a116aa1d6219bfedc3"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9300fac73ddc7e4b0330acbdda4efaabf74929a4a61e119a32a181f534a11b47"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ecaf7b0e39caeb1aa6dd6e0975c405716c82c1312b55ac4f716ef563a906969"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5170be9ec942f3d1d317817ced8d749b3e1202670865e4fd465e35d8c259de83"},
+ {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f1ae08fa9aea5891cbd69df29913e11d3841798e0bfb1ff78b78e4e7ea0a4"},
+ {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ee86d4095d3542d73579e953c2e5e07d9321af2ffea6ecc097d16d538a2dea16"},
+ {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:86dcd08da163912e17b27bbaba5efdc71b4fbffb841530fdb74c5707f3c49216"},
+ {file = "tokenizers-0.20.1-cp311-none-win32.whl", hash = "sha256:9af2dc4ee97d037bc6b05fa4429ddc87532c706316c5e11ce2f0596dfcfa77af"},
+ {file = "tokenizers-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:899152a78b095559c287b4c6d0099469573bb2055347bb8154db106651296f39"},
+ {file = "tokenizers-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:407ab666b38e02228fa785e81f7cf79ef929f104bcccf68a64525a54a93ceac9"},
+ {file = "tokenizers-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f13a2d16032ebc8bd812eb8099b035ac65887d8f0c207261472803b9633cf3e"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e98eee4dca22849fbb56a80acaa899eec5b72055d79637dd6aa15d5e4b8628c9"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47c1bcdd61e61136087459cb9e0b069ff23b5568b008265e5cbc927eae3387ce"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128c1110e950534426e2274837fc06b118ab5f2fa61c3436e60e0aada0ccfd67"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2e2d47a819d2954f2c1cd0ad51bb58ffac6f53a872d5d82d65d79bf76b9896d"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bdd67a0e3503a9a7cf8bc5a4a49cdde5fa5bada09a51e4c7e1c73900297539bd"},
+ {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b93d2e26d04da337ac407acec8b5d081d8d135e3e5066a88edd5bdb5aff89"},
+ {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0c6a796ddcd9a19ad13cf146997cd5895a421fe6aec8fd970d69f9117bddb45c"},
+ {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3ea919687aa7001a8ff1ba36ac64f165c4e89035f57998fa6cedcfd877be619d"},
+ {file = "tokenizers-0.20.1-cp312-none-win32.whl", hash = "sha256:6d3ac5c1f48358ffe20086bf065e843c0d0a9fce0d7f0f45d5f2f9fba3609ca5"},
+ {file = "tokenizers-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:b0874481aea54a178f2bccc45aa2d0c99cd3f79143a0948af6a9a21dcc49173b"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:96af92e833bd44760fb17f23f402e07a66339c1dcbe17d79a9b55bb0cc4f038e"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:65f34e5b731a262dfa562820818533c38ce32a45864437f3d9c82f26c139ca7f"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17f98fccb5c12ab1ce1f471731a9cd86df5d4bd2cf2880c5a66b229802d96145"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8c0fc3542cf9370bf92c932eb71bdeb33d2d4aeeb4126d9fd567b60bd04cb30"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b39356df4575d37f9b187bb623aab5abb7b62c8cb702867a1768002f814800c"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfdad27b0e50544f6b838895a373db6114b85112ba5c0cefadffa78d6daae563"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:094663dd0e85ee2e573126918747bdb40044a848fde388efb5b09d57bc74c680"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e4cf033a2aa207d7ac790e91adca598b679999710a632c4a494aab0fc3a1b2"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9310951c92c9fb91660de0c19a923c432f110dbfad1a2d429fbc44fa956bf64f"},
+ {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05e41e302c315bd2ed86c02e917bf03a6cf7d2f652c9cee1a0eb0d0f1ca0d32c"},
+ {file = "tokenizers-0.20.1-cp37-none-win32.whl", hash = "sha256:212231ab7dfcdc879baf4892ca87c726259fa7c887e1688e3f3cead384d8c305"},
+ {file = "tokenizers-0.20.1-cp37-none-win_amd64.whl", hash = "sha256:896195eb9dfdc85c8c052e29947169c1fcbe75a254c4b5792cdbd451587bce85"},
+ {file = "tokenizers-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:741fb22788482d09d68e73ece1495cfc6d9b29a06c37b3df90564a9cfa688e6d"},
+ {file = "tokenizers-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10be14ebd8082086a342d969e17fc2d6edc856c59dbdbddd25f158fa40eaf043"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:514cf279b22fa1ae0bc08e143458c74ad3b56cd078b319464959685a35c53d5e"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a647c5b7cb896d6430cf3e01b4e9a2d77f719c84cefcef825d404830c2071da2"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cdf379219e1e1dd432091058dab325a2e6235ebb23e0aec8d0508567c90cd01"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ba72260449e16c4c2f6f3252823b059fbf2d31b32617e582003f2b18b415c39"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:910b96ed87316e4277b23c7bcaf667ce849c7cc379a453fa179e7e09290eeb25"},
+ {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53975a6694428a0586534cc1354b2408d4e010a3103117f617cbb550299797c"},
+ {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:07c4b7be58da142b0730cc4e5fd66bb7bf6f57f4986ddda73833cd39efef8a01"},
+ {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b605c540753e62199bf15cf69c333e934077ef2350262af2ccada46026f83d1c"},
+ {file = "tokenizers-0.20.1-cp38-none-win32.whl", hash = "sha256:88b3bc76ab4db1ab95ead623d49c95205411e26302cf9f74203e762ac7e85685"},
+ {file = "tokenizers-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:d412a74cf5b3f68a90c615611a5aa4478bb303d1c65961d22db45001df68afcb"},
+ {file = "tokenizers-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a25dcb2f41a0a6aac31999e6c96a75e9152fa0127af8ece46c2f784f23b8197a"},
+ {file = "tokenizers-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a12c3cebb8c92e9c35a23ab10d3852aee522f385c28d0b4fe48c0b7527d59762"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02e18da58cf115b7c40de973609c35bde95856012ba42a41ee919c77935af251"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f326a1ac51ae909b9760e34671c26cd0dfe15662f447302a9d5bb2d872bab8ab"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b4872647ea6f25224e2833b044b0b19084e39400e8ead3cfe751238b0802140"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce6238a3311bb8e4c15b12600927d35c267b92a52c881ef5717a900ca14793f7"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57b7a8880b208866508b06ce365dc631e7a2472a3faa24daa430d046fb56c885"},
+ {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a908c69c2897a68f412aa05ba38bfa87a02980df70f5a72fa8490479308b1f2d"},
+ {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:da1001aa46f4490099c82e2facc4fbc06a6a32bf7de3918ba798010954b775e0"},
+ {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:42c097390e2f0ed0a5c5d569e6669dd4e9fff7b31c6a5ce6e9c66a61687197de"},
+ {file = "tokenizers-0.20.1-cp39-none-win32.whl", hash = "sha256:3d4d218573a3d8b121a1f8c801029d70444ffb6d8f129d4cca1c7b672ee4a24c"},
+ {file = "tokenizers-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:37d1e6f616c84fceefa7c6484a01df05caf1e207669121c66213cb5b2911d653"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48689da7a395df41114f516208d6550e3e905e1239cc5ad386686d9358e9cef0"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:712f90ea33f9bd2586b4a90d697c26d56d0a22fd3c91104c5858c4b5b6489a79"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:359eceb6a620c965988fc559cebc0a98db26713758ec4df43fb76d41486a8ed5"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d3caf244ce89d24c87545aafc3448be15870096e796c703a0d68547187192e1"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b03cf8b9a32254b1bf8a305fb95c6daf1baae0c1f93b27f2b08c9759f41dee"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:218e5a3561561ea0f0ef1559c6d95b825308dbec23fb55b70b92589e7ff2e1e8"},
+ {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f40df5e0294a95131cc5f0e0eb91fe86d88837abfbee46b9b3610b09860195a7"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:08aaa0d72bb65058e8c4b0455f61b840b156c557e2aca57627056624c3a93976"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:998700177b45f70afeb206ad22c08d9e5f3a80639dae1032bf41e8cbc4dada4b"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62f7fbd3c2c38b179556d879edae442b45f68312019c3a6013e56c3947a4e648"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31e87fca4f6bbf5cc67481b562147fe932f73d5602734de7dd18a8f2eee9c6dd"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:956f21d359ae29dd51ca5726d2c9a44ffafa041c623f5aa33749da87cfa809b9"},
+ {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1fbbaf17a393c78d8aedb6a334097c91cb4119a9ced4764ab8cfdc8d254dc9f9"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ebe63e31f9c1a970c53866d814e35ec2ec26fda03097c486f82f3891cee60830"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:81970b80b8ac126910295f8aab2d7ef962009ea39e0d86d304769493f69aaa1e"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130e35e76f9337ed6c31be386e75d4925ea807055acf18ca1a9b0eec03d8fe23"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd28a8614f5c82a54ab2463554e84ad79526c5184cf4573bbac2efbbbcead457"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9041ee665d0fa7f5c4ccf0f81f5e6b7087f797f85b143c094126fc2611fec9d0"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:62eb9daea2a2c06bcd8113a5824af8ef8ee7405d3a71123ba4d52c79bb3d9f1a"},
+ {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f861889707b54a9ab1204030b65fd6c22bdd4a95205deec7994dc22a8baa2ea4"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:89d5c337d74ea6e5e7dc8af124cf177be843bbb9ca6e58c01f75ea103c12c8a9"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0b7f515c83397e73292accdbbbedc62264e070bae9682f06061e2ddce67cacaf"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0305fc1ec6b1e5052d30d9c1d5c807081a7bd0cae46a33d03117082e91908c"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc611e6ac0fa00a41de19c3bf6391a05ea201d2d22b757d63f5491ec0e67faa"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5ffe0d7f7bfcfa3b2585776ecf11da2e01c317027c8573c78ebcb8985279e23"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e7edb8ec12c100d5458d15b1e47c0eb30ad606a05641f19af7563bc3d1608c14"},
+ {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:de291633fb9303555793cc544d4a86e858da529b7d0b752bcaf721ae1d74b2c9"},
+ {file = "tokenizers-0.20.1.tar.gz", hash = "sha256:84edcc7cdeeee45ceedb65d518fffb77aec69311c9c8e30f77ad84da3025f002"},
+]
+
+[package.dependencies]
+huggingface-hub = ">=0.16.4,<1.0"
+
+[package.extras]
+dev = ["tokenizers[testing]"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
+
[[package]]
name = "tomli"
version = "2.0.2"
@@ -1159,6 +1691,20 @@ files = [
{file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"},
]
+[[package]]
+name = "types-requests"
+version = "2.32.0.20241016"
+description = "Typing stubs for requests"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
+ {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
+]
+
+[package.dependencies]
+urllib3 = ">=2"
+
[[package]]
name = "typing-extensions"
version = "4.12.2"
@@ -1288,4 +1834,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "3dbb1db1562689821e480dbd190f047bfee69dd5f823b309e4c1c5b5602c74e2"
+content-hash = "815e511ec72db62504a4fd51c5b8349057753aa66bd75cc1fee42819d1083482"
diff --git a/pyproject.toml b/pyproject.toml
index 0d5bcfba..0a69d240 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,6 +28,11 @@ packages = [
[tool.poetry.group.dev.dependencies]
parse-type = "^0.6.4"
+anthropic = "^0.37.1"
+mistralai = "<1.0.0"
+groq = "^0.11.0"
+cohere = "^5.11.2"
+replicate = "^1.0.3"
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -48,11 +53,13 @@ opentelemetry-instrumentation-anthropic = "^0.33.3"
opentelemetry-instrumentation-mistralai = "^0.33.3"
parse = "^1.20.2"
+opentelemetry-instrumentation-groq = "^0.33.3"
+opentelemetry-instrumentation-replicate = "^0.33.3"
[tool.poetry.dev-dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
-python-dateutil = "^2.9.0"
+python-dateutil = "^2.8.2"
types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
python-dotenv = "^1.0.1"
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 7cd06b6f..169b7ded 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -46,8 +46,19 @@ def instrument_provider(provider: TracerProvider):
if module_is_installed("mistralai"):
from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
+ # TODO: Need to to a PR to the instrumentor to support > 1.0.0 Mistral clients
MistralAiInstrumentor().instrument(tracer_provider=provider)
+ if module_is_installed("groq"):
+ from opentelemetry.instrumentation.groq import GroqInstrumentor
+
+ GroqInstrumentor().instrument(tracer_provider=provider)
+
+ if module_is_installed("replicate"):
+ from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
+
+ ReplicateInstrumentor().instrument(tracer_provider=provider)
+
def push_trace_context(trace_metadata: dict):
"""Set metadata for Trace parent.
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 29123996..e826ab20 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,7 +1,7 @@
import typing
from queue import Queue
-from typing import Any, Optional
from threading import Thread
+from typing import Any, Optional
from opentelemetry import trace
from opentelemetry.sdk.trace import ReadableSpan
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 18a8819f..73f08bec 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -118,6 +118,20 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
prompt_kernel["template"] = hl_file.get("prompt", {}).get("template", None)
if "provider" not in prompt_kernel:
prompt_kernel["provider"] = gen_ai_object.get("system", None)
+ if prompt_kernel["provider"]:
+ prompt_kernel["provider"] = prompt_kernel["provider"].lower()
+ if prompt_kernel["provider"] not in [
+ "openai",
+ "openai_azure",
+ "mock",
+ "anthropic",
+ "bedrock",
+ "cohere",
+ "replicate",
+ "google",
+ "groq",
+ ]:
+ raise ValueError("Invalid provider")
if "temperature" not in prompt_kernel:
prompt_kernel["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
if "top_p" not in prompt_kernel:
diff --git a/tests/conftest.py b/tests/conftest.py
index 12075e16..5c5597f3 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -3,6 +3,11 @@
import pytest
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor # type: ignore
+from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
+from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
+from opentelemetry.instrumentation.cohere import CohereInstrumentor
+from opentelemetry.instrumentation.groq import GroqInstrumentor
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.trace import Tracer
@@ -45,15 +50,23 @@ def opentelemetry_test_configuration(
exporter = InMemorySpanExporter()
processor = SimpleSpanProcessor(exporter)
opentelemetry_test_provider.add_span_processor(processor)
- instrumentor = OpenAIInstrumentor()
- instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ instrumentors: list[BaseInstrumentor] = [
+ OpenAIInstrumentor(),
+ AnthropicInstrumentor(),
+ GroqInstrumentor(),
+ CohereInstrumentor(),
+ ReplicateInstrumentor(),
+ ]
+ for instrumentor in instrumentors:
+ instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
tracer = opentelemetry_test_provider.get_tracer("test")
# Circumvent configuration procedure
INTERNAL_OT._TRACER = tracer
yield tracer, exporter
- instrumentor.uninstrument()
+ for instrumentor in instrumentors:
+ instrumentor.uninstrument()
INTERNAL_OT._TRACER = None
@@ -64,14 +77,22 @@ def opentelemetry_hl_test_configuration(
exporter = InMemorySpanExporter()
processor = HumanloopSpanProcessor(exporter=exporter)
opentelemetry_test_provider.add_span_processor(processor)
- instrumentor = OpenAIInstrumentor()
- instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ instrumentors: list[BaseInstrumentor] = [
+ OpenAIInstrumentor(),
+ AnthropicInstrumentor(),
+ GroqInstrumentor(),
+ CohereInstrumentor(),
+ ReplicateInstrumentor(),
+ ]
+ for instrumentor in instrumentors:
+ instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
tracer = opentelemetry_test_provider.get_tracer("test")
INTERNAL_OT._TRACER = tracer
yield tracer, exporter
- instrumentor.uninstrument()
+ for instrumentor in instrumentors:
+ instrumentor.uninstrument()
INTERNAL_OT._TRACER = None
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 4eebf435..debe429b 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -1,57 +1,129 @@
import os
-from typing import Any, Optional
+from typing import Any, Literal, Optional
+import typing
+import cohere
import pytest
+
+# replicate has no typing stubs
+import replicate # type: ignore
from dotenv import load_dotenv
+from groq import Groq
from humanloop.decorators.prompt import prompt
from humanloop.otel.constants import HL_FILE_OT_KEY
-from humanloop.otel.helpers import NestedDict, is_humanloop_span, read_from_opentelemetry_span
+from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from openai import OpenAI
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
-def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
+def _call_llm_base(provider: Literal["openai", "anthropic"], messages: list[dict]) -> Optional[str]:
load_dotenv()
- # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
- # provider calls. Could not find a way to intercept them coming from a Mock.
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- return (
- client.chat.completions.create(
- model="gpt-4o",
- messages=messages,
- temperature=0.8,
+ if provider == "openai":
+ # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
+ # provider calls. Could not find a way to intercept them coming from a Mock.
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages, # type: ignore
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+ # if provider == "anthropic":
+ # client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
+ # return client.messages.create(
+ # model="claude-3-opus",
+ # messages=messages,
+ # max_tokens=200,
+ # ).content
+ # if provider == "mistralai":
+ # client = MistralClient(api_key=os.getenv("MISTRAL_API_KEY"))
+ # response = client.chat(model="mistral-small-latest", messages=messages, temperature=0.8)
+ # return response.choices[0].message.content
+ if provider == "groq":
+ # Note GROQ might be unavailable, leading to
+ # test failure. Returns groq.NotFoundError: Not Found
+ client = Groq(
+ # This is the default and can be omitted
+ api_key=os.environ.get("GROQ_API_KEY"),
+ )
+ return (
+ client.chat.completions.create(
+ messages=messages,
+ model="llama3-8b-8192",
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
)
- .choices[0]
- .message.content
- )
+ if provider == "cohere":
+ client = cohere.Client(api_key=os.getenv("COHERE_API_KEY"))
+ messages_cohere_format = []
+ for message in messages:
+ if message["role"] == "system":
+ messages_cohere_format.append(cohere.SystemMessage(message=message["content"]))
+ elif message["role"] == "user":
+ messages_cohere_format.append(cohere.UserMessage(message=message["content"]))
+ elif message["role"] == "assistant":
+ messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"]))
+ return client.chat(
+ chat_history=messages_cohere_format,
+ model="command",
+ max_tokens=200,
+ message=messages[-1]["content"],
+ temperature=0.8,
+ ).text
+ if provider == "replicate":
+ # TODO: Instrumentor only picks up methods on module-level, not client level
+ replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY")
+ output = ""
+ for event in replicate.run(
+ "meta/meta-llama-3-8b-instruct",
+ input={
+ "prompt": messages[0]["content"] + " " + messages[-1]["content"],
+ "temperature": 0.8,
+ },
+ ):
+ output += str(event)
+ return output
+ raise ValueError(f"Unknown provider: {provider}")
-@prompt(path=None, template="You are an assistant on the following topics: {topics}.", temperature=0.9, top_p=0.1)
-def _call_llm_with_defaults(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
- load_dotenv()
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- return (
- client.chat.completions.create(
- model="gpt-4o",
- messages=messages,
- temperature=0.8,
- )
- .choices[0]
- .message.content
- )
+# prompt is a decorator, but for sake of brevity, I am using it as a higher-order function
+_call_llm = prompt(
+ path=None,
+ template="You are an assistant on the following topics: {topics}.",
+)(_call_llm_base)
+_call_llm_with_defaults = prompt(
+ path=None,
+ template="You are an assistant on the following topics: {topics}.",
+ temperature=0.9,
+ top_p=0.1,
+)(_call_llm_base)
+@pytest.mark.parametrize(
+ "provider",
+ (
+ "openai",
+ "groq",
+ "cohere",
+ "replicate",
+ ),
+)
def test_prompt(
+ provider: str,
opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN a default OpenTelemetry configuration
_, exporter = opentelemetry_test_configuration
# WHEN using the Prompt decorator
- _call_llm(messages=call_llm_messages)
+ _call_llm(provider=provider, messages=call_llm_messages)
# THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
spans = exporter.get_finished_spans()
assert len(spans) == 2
@@ -60,33 +132,54 @@ def test_prompt(
assert spans[1].attributes.get("prompt") is None # type: ignore
+@pytest.mark.parametrize(
+ "provider",
+ (
+ "openai",
+ "groq",
+ "cohere",
+ "replicate",
+ ),
+)
def test_prompt_hl_processor(
+ provider: str,
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator
- _call_llm(messages=call_llm_messages)
+ _call_llm(provider=provider, messages=call_llm_messages)
# THEN a single span is created since the LLM provider call span is merged in the Prompt span
spans = exporter.get_finished_spans()
assert len(spans) == 1
- assert is_humanloop_span(spans[0])
+ assert is_humanloop_span(span=spans[0])
prompt: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
assert prompt is not None
# THEN temperature is taken from LLM provider call, but top_p is not since it is not specified
assert prompt["temperature"] == 0.8
+ assert prompt["provider"] == provider
assert prompt.get("top_p") is None
+@pytest.mark.parametrize(
+ "provider",
+ (
+ "openai",
+ "groq",
+ "cohere",
+ "replicate",
+ ),
+)
def test_prompt_with_defaults(
+ provider: str,
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator with default values
- _call_llm_with_defaults(messages=call_llm_messages)
+ _call_llm_with_defaults(provider=provider, messages=call_llm_messages)
# THEN a single span is created since the LLM provider call span is merged in the Prompt span
spans = exporter.get_finished_spans()
assert len(spans) == 1
From 366a2c6e43b8865f14c529f90a8b0938f0233098 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 09:44:29 +0000
Subject: [PATCH 15/70] More decorator tests
---
.github/workflows/ci.yml | 4 +
poetry.lock | 197 +++++++++++++++-
pyproject.toml | 2 +
src/humanloop/client.py | 129 +++++++---
src/humanloop/decorators/prompt.py | 25 +-
src/humanloop/decorators/tool.py | 272 ++++++++++++----------
src/humanloop/otel/processor.py | 54 ++---
tests/conftest.py | 1 +
tests/decorators/test_flow_decorator.py | 14 +-
tests/decorators/test_prompt_decorator.py | 172 +++++++-------
tests/decorators/test_tool_decorator.py | 199 ++++++++++++++--
11 files changed, 766 insertions(+), 303 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 59e3e16c..e043d505 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -37,6 +37,10 @@ jobs:
run: poetry run pytest -rP .
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }}
+ GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
+ COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
publish:
needs: [compile, test]
diff --git a/poetry.lock b/poetry.lock
index 64ce6c02..fc30b989 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -58,6 +58,25 @@ doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)",
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
+[[package]]
+name = "attrs"
+version = "24.2.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"},
+ {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"},
+]
+
+[package.extras]
+benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+
[[package]]
name = "certifi"
version = "2024.8.30"
@@ -610,6 +629,41 @@ files = [
{file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"},
]
+[[package]]
+name = "jsonschema"
+version = "4.23.0"
+description = "An implementation of JSON Schema validation for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
+ {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+jsonschema-specifications = ">=2023.03.6"
+referencing = ">=0.28.4"
+rpds-py = ">=0.7.1"
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=24.6.0)"]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2024.10.1"
+description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"},
+ {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"},
+]
+
+[package.dependencies]
+referencing = ">=0.31.0"
+
[[package]]
name = "mistralai"
version = "0.4.2"
@@ -1275,6 +1329,21 @@ files = [
{file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
]
+[[package]]
+name = "referencing"
+version = "0.35.1"
+description = "JSON Referencing + Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "referencing-0.35.1-py3-none-any.whl", hash = "sha256:eda6d3234d62814d1c64e305c1331c9a3a6132da475ab6382eaa997b21ee75de"},
+ {file = "referencing-0.35.1.tar.gz", hash = "sha256:25b42124a6c8b632a425174f24087783efb348a6f1e0008e63cd4466fedf703c"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+rpds-py = ">=0.7.0"
+
[[package]]
name = "regex"
version = "2024.9.11"
@@ -1416,6 +1485,118 @@ urllib3 = ">=1.21.1,<3"
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+[[package]]
+name = "rpds-py"
+version = "0.20.0"
+description = "Python bindings to Rust's persistent data structures (rpds)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"},
+ {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"},
+ {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"},
+ {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"},
+ {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"},
+ {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"},
+ {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"},
+ {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"},
+ {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"},
+ {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"},
+ {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"},
+ {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"},
+ {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"},
+ {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"},
+ {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"},
+ {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"},
+ {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"},
+ {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"},
+ {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"},
+ {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"},
+ {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"},
+ {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"},
+ {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"},
+ {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"},
+ {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"},
+ {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"},
+ {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"},
+ {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"},
+ {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"},
+ {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"},
+ {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"},
+ {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"},
+ {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"},
+ {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"},
+ {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"},
+ {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"},
+ {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"},
+ {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"},
+ {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"},
+ {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"},
+ {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"},
+ {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"},
+ {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"},
+ {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"},
+ {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"},
+ {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"},
+ {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"},
+ {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"},
+ {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"},
+ {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"},
+ {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"},
+]
+
[[package]]
name = "ruff"
version = "0.5.7"
@@ -1680,6 +1861,20 @@ notebook = ["ipywidgets (>=6)"]
slack = ["slack-sdk"]
telegram = ["requests"]
+[[package]]
+name = "types-jsonschema"
+version = "4.23.0.20240813"
+description = "Typing stubs for jsonschema"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "types-jsonschema-4.23.0.20240813.tar.gz", hash = "sha256:c93f48206f209a5bc4608d295ac39f172fb98b9e24159ce577dbd25ddb79a1c0"},
+ {file = "types_jsonschema-4.23.0.20240813-py3-none-any.whl", hash = "sha256:be283e23f0b87547316c2ee6b0fd36d95ea30e921db06478029e10b5b6aa6ac3"},
+]
+
+[package.dependencies]
+referencing = "*"
+
[[package]]
name = "types-python-dateutil"
version = "2.9.0.20241003"
@@ -1834,4 +2029,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "815e511ec72db62504a4fd51c5b8349057753aa66bd75cc1fee42819d1083482"
+content-hash = "ea6486f3664117d7ad905484926afaae5e9f260583f3a9b398ac5580f4bfc6a8"
diff --git a/pyproject.toml b/pyproject.toml
index 0a69d240..9405b9c2 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -33,6 +33,8 @@ mistralai = "<1.0.0"
groq = "^0.11.0"
cohere = "^5.11.2"
replicate = "^1.0.3"
+jsonschema = "^4.23.0"
+types-jsonschema = "^4.23.0.20240813"
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index b64448aa..45a4bec8 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,14 +1,20 @@
import typing
-from typing import Literal, Optional, List, Sequence, Union
+from typing import Any, Optional, List, Sequence
import os
import httpx
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
+from humanloop.types.response_format import ResponseFormat
+
from .decorators.flow import flow as flow_decorator
from .decorators.prompt import prompt as prompt_decorator
from .decorators.tool import tool as tool_decorator
from humanloop.core.client_wrapper import SyncClientWrapper
+from humanloop.types.model_endpoints import ModelEndpoints
+from humanloop.types.model_providers import ModelProviders
+from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
+from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
from .otel.exporter import HumanloopSpanExporter
from .otel.processor import HumanloopSpanProcessor
from .otel import instrument_provider, set_tracer
@@ -116,39 +122,77 @@ def __init__(
def prompt(
self,
+ *,
# TODO: Template can be a list of objects
path: Optional[str] = None,
model: Optional[str] = None,
- endpoint: Optional[Literal["chat", "edit", "complete"]] = None,
- template: Optional[str] = None,
- provider: Optional[
- Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
- ] = None,
+ endpoint: Optional[ModelEndpoints] = None,
+ template: Optional[PromptKernelRequestTemplate] = None,
+ provider: Optional[ModelProviders] = None,
max_tokens: Optional[int] = None,
- stop: Optional[Union[str, list[str]]] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
+ stop: Optional[PromptKernelRequestStop] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
+ other: Optional[dict[str, Optional[Any]]] = None,
+ seed: Optional[int] = None,
+ response_format: Optional[ResponseFormat] = None,
):
"""Decorator to mark a function as a Humanloop Prompt.
- The decorator intercepts calls to LLM provider APIs and uses them
- in tandem with the template provided by the user to create a Prompt
- in Humanloop.
-
- Arguments:
- path: Optional. The path where the Prompt is created. If not
- provided, the function name is used as the path and
- the File is created in the root of your Humanloop's
- organization workspace.
- template: The template for the Prompt. This is the text of
- the system message used to set the LLM prompt. The template
- accepts template slots using the format `{slot_name}`.
-
- The text of the system message is matched against the template
- to extract the slot values. The extracted values will be
- available in the Log's inputs
+ The decorator intercepts calls to LLM provider APIs, extracts
+ hyperparameters used in the call, and upsert a new Prompt File
+ on Humanloop based on them. If a hyperparameter is specified in the
+ `@prompt` decorator, then it overrides any inference made from inside
+ the function.
+
+ If the Prompt already exists on the specified path, a new version will
+ be inserted when the hyperparameters used in making LLM calls change.
+
+ :param path: The path where the Prompt is created. If not
+ provided, the function name is used as the path and the File
+ is created in the root of your Humanloop's organization workspace.
+
+ :param model: Name of the model used by the Prompt.
+
+ :param endpoint: The model instance used, e.g. `gpt-4`. See
+ [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ :param template: The template for the Prompt. This is the text of
+ the system message used to set the LLM prompt. The template
+ accepts template slots using the format `{slot_name}`.
+
+ :param provider: The company providing the underlying model service.
+
+ :param max_tokens: Maximum number of tokens used in generation.
+
+ :param temperature: What sampling temperature to use
+ when making a generation. Higher values means the model
+ will be more creative.
+
+ :param top_p: An alternative to sampling with temperature,
+ called nucleus sampling, where the model considers the results
+ of the tokens with top_p probability mass.
+
+ :param stop: Token or list of tokens that stop generation
+
+ :param presence_penalty: Number between -2.0 and 2.0.
+ Positive values penalize new tokens based on whether they
+ appear in the generation so far.
+
+ :param frequency_penalty: Number between -2.0 and 2.0. Positive
+ values penalize new tokens based on how frequently they
+ appear in the generation so far.
+
+ :param other: Other parameter values to be passed to the provider call.
+
+ :param seed: If specified, model will make a best effort to
+ sample deterministically, but it is not guaranteed.
+
+ :param response_format: The format of the response.
+ Only `{"type": "json_object"}` is currently supported
+ for chat.
"""
return prompt_decorator(
path=path,
@@ -162,12 +206,17 @@ def prompt(
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
)
def tool(
self,
+ *,
path: Optional[str] = None,
- attributes: Optional[dict[str, typing.Any]] = None,
+ setup_values: Optional[dict[str, Optional[Any]]] = None,
+ attributes: Optional[dict[str, Optional[Any]]] = None,
):
"""Decorator to mark a function as a Humanloop Tool.
@@ -178,15 +227,26 @@ def tool(
Every call to the decorated function will create a Log against the Tool.
- Arguments:
- path: Optional. The path to the Tool. If not provided, the function name
- will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
+ :param path: The path to the Tool. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+
+ :param setup_values: Values needed to setup the Tool, defined in
+ JSON Schema format: https://json-schema.org/
+
+ :param attributes: Additional fields to describe the Tool.
+ Helpful to separate Tool versions from each other
+ with details on how they were created or used.
"""
- return tool_decorator(path=path, attributes=attributes)
+ return tool_decorator(
+ path=path,
+ setup_values=setup_values,
+ attributes=attributes,
+ )
def flow(
self,
+ *,
path: Optional[str] = None,
attributes: dict[str, typing.Any] = {},
):
@@ -199,12 +259,11 @@ def flow(
functions called in the context of function decorated with Flow will create
a Trace in Humanloop.
- Arguments:
- path: Optional. The path to the Flow. If not provided, the function name
- will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
- attributes: Optional. The attributes of the Flow. The attributes are used
- to version the Flow.
+ :param path: The path to the Flow. If not provided, the function name
+ will be used as the path and the File will be created in the root
+ of your Humanloop's organization workspace.
+
+ :param attributes: A key-value object identifying the Flow Version.
"""
return flow_decorator(path=path, attributes=attributes)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index b3f0bf64..7a44fe3f 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,27 +1,33 @@
import uuid
from functools import wraps
-from typing import Callable, Literal, Optional, Union
+from typing import Any, Callable, Optional
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.types.model_endpoints import ModelEndpoints
+from humanloop.types.model_providers import ModelProviders
+from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
+from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
+from humanloop.types.response_format import ResponseFormat
def prompt(
path: Optional[str] = None,
- # TODO: Template can be a list of objects
+ # TODO: Template can be a list of objects?
model: Optional[str] = None,
- endpoint: Optional[Literal["chat", "edit", "complete"]] = None,
- template: Optional[str] = None,
- provider: Optional[
- Literal["openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq"]
- ] = None,
+ endpoint: Optional[ModelEndpoints] = None,
+ template: Optional[PromptKernelRequestTemplate] = None,
+ provider: Optional[ModelProviders] = None,
max_tokens: Optional[int] = None,
- stop: Optional[Union[str, list[str]]] = None,
+ stop: Optional[PromptKernelRequestStop] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
presence_penalty: Optional[float] = None,
frequency_penalty: Optional[float] = None,
+ other: Optional[dict[str, Optional[Any]]] = None,
+ seed: Optional[int] = None,
+ response_format: Optional[ResponseFormat] = None,
):
def decorator(func: Callable):
if temperature is not None:
@@ -77,6 +83,9 @@ def wrapper(*args, **kwargs):
"provider": provider,
"max_tokens": max_tokens,
"stop": stop,
+ "other": other,
+ "seed": seed,
+ "response_format": response_format,
},
},
)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 0bd35b60..9a229e22 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -4,131 +4,144 @@
import typing
import uuid
from functools import wraps
-from typing import Callable, Literal, Optional, TypedDict, Union
+from typing import Any, Callable, Mapping, Optional, Sequence, TypedDict, Union
from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.types.tool_function import ToolFunction
+from humanloop.types.tool_kernel_request import ToolKernelRequest
from .helpers import args_to_inputs
-class JSONSchemaProperty(TypedDict):
- type: Literal["number", "boolean", "string", "object"]
-
-
-class JSONSchemaArray(TypedDict):
- type: Literal["array"]
- items: JSONSchemaProperty
-
-
-class JSONSchemaObjectProperty(TypedDict):
- key: JSONSchemaProperty
- value: JSONSchemaProperty
-
+def _extract_annotation_signature(annotation: typing.Type) -> Union[list, tuple]:
+ origin = typing.get_origin(annotation)
+ if origin is None:
+ if annotation is inspect._empty:
+ raise ValueError("Empty type hint annotation")
+ return [annotation]
+ if origin is list:
+ inner_type = _extract_annotation_signature(typing.get_args(annotation)[0])
+ return [origin, inner_type]
+ if origin is dict:
+ key_type = _extract_annotation_signature(typing.get_args(annotation)[0])
+ value_type = _extract_annotation_signature(typing.get_args(annotation)[1])
+ return [origin, key_type, value_type]
+ if origin is tuple:
+ return [
+ origin,
+ *[_extract_annotation_signature(arg) for arg in typing.get_args(annotation)],
+ ]
+ if origin is typing.Union:
+ sub_types = typing.get_args(annotation)
+ if sub_types[-1] is type(None):
+ # Union is an Optional type
+ if len(sub_types) == 2:
+ return tuple(_extract_annotation_signature(sub_types[0]))
+ return (
+ origin,
+ *[_extract_annotation_signature(sub_type) for sub_type in sub_types[:-1]],
+ )
+ # Union type
+ return [
+ origin,
+ *[_extract_annotation_signature(sub_type) for sub_type in sub_types],
+ ]
+
+ raise ValueError(f"Unsupported origin: {origin}")
+
+
+def _build_json_schema_parameter(arg: Union[list, tuple]) -> Mapping[str, Union[str, Mapping, Sequence]]:
+ is_nullable = isinstance(arg, tuple)
+ arg_type: Mapping[str, Union[str, Mapping, Sequence]]
+ if arg[0] is typing.Union:
+ arg_type = {
+ "anyOf": [_build_json_schema_parameter(sub_type) for sub_type in arg[1:]],
+ }
+ if arg[0] is tuple:
+ arg_type = {
+ "type": "array",
+ "items": [_build_json_schema_parameter(sub_type) for sub_type in arg[1:]],
+ }
+ if arg[0] is list:
+ arg_type = {
+ "type": "array",
+ "items": _build_json_schema_parameter(arg[1]),
+ }
+ if arg[0] is dict:
+ arg_type = {
+ "type": "object",
+ "properties": {
+ "key": _build_json_schema_parameter(arg[1]),
+ "value": _build_json_schema_parameter(arg[2]),
+ },
+ }
+ if arg[0] is builtins.str:
+ arg_type = {"type": "string"}
+ if arg[0] is builtins.int:
+ arg_type = {"type": "integer"}
+ if arg[0] is builtins.float:
+ arg_type = {"type": "number"}
+ if arg[0] is builtins.bool:
+ arg_type = {"type": "boolean"}
+
+ if is_nullable:
+ if arg[0] is typing.Union:
+ arg_type["anyOf"] = [ # type: ignore
+ {**type_option, "type": [type_option["type"], "null"]} # type: ignore
+ for type_option in arg_type["anyOf"] # type: ignore
+ ]
+ else:
+ arg_type = {**arg_type, "type": [arg_type["type"], "null"]}
-class JSONSchemaObject(TypedDict):
- type: Literal["object"]
- properties: JSONSchemaObjectProperty
+ return arg_type
class JSONSchemaFunctionParameters(TypedDict):
- type: Literal["object"]
- required: tuple[str]
- properties: dict[str, Union[JSONSchemaProperty, JSONSchemaArray, JSONSchemaObject]]
-
-
-def _type_to_schema(type_hint):
- if type_hint is int:
- return "number"
- if type_hint is float:
- return "number"
- if type_hint is bool:
- return "boolean"
- if type_hint is str:
- return "string"
- if type_hint is dict:
- return "object"
- raise ValueError(f"Unsupported type hint: {type_hint}")
-
-
-def _handle_dict_annotation(parameter: inspect.Parameter) -> JSONSchemaObject:
- try:
- type_key, type_value = typing.get_args(parameter.annotation)
- except ValueError:
- raise ValueError("Dict annotation must have two type hints")
- if type_key not in (builtins.str, builtins.int, typing.Literal, builtins.float):
- raise ValueError("Dict keys must be strings or integers", parameter.name, type_key)
- if type_value not in (
- builtins.str,
- builtins.int,
- typing.Literal,
- builtins.float,
- dict,
- ):
- raise ValueError("Dict values must be strings or integers", parameter.name, type_value)
- return JSONSchemaObject(
- type="object",
- properties=JSONSchemaObjectProperty(
- key={"type": _type_to_schema(type_key)},
- value={"type": _type_to_schema(type_value)},
- ),
- )
-
-
-def _handle_list_annotation(parameter: inspect.Parameter) -> JSONSchemaArray:
- try:
- list_type = typing.get_args(parameter.annotation)[0]
- except ValueError:
- raise ValueError("List annotation must have one type hint")
- return JSONSchemaArray(
- type="array",
- items={
- "type": _type_to_schema(list_type),
- },
- )
+ type: str
+ properties: dict[str, dict]
+ required: list[str]
-def _handle_optional_annotation(parameter: inspect.Parameter) -> JSONSchemaProperty:
- union_types = [sub_type for sub_type in typing.get_args(parameter.annotation) if sub_type != type(None)]
- if len(union_types) != 1:
- raise ValueError("Union types are not supported. Try passing a string and parsing inside function")
- return {"type": _type_to_schema(union_types[0])}
-
-
-def _handle_simple_type(parameter: inspect.Parameter) -> JSONSchemaProperty:
- if parameter.annotation is None:
- raise ValueError("Parameters must have type hints")
- return {"type": _type_to_schema(parameter.annotation)}
+def _parameter_is_optional(parameter: inspect.Parameter) -> bool:
+ """Check if tool parameter is mandatory."""
+ # Check if the parameter can be None, either via Optional[T] or T | None type hint
+ origin = typing.get_origin(parameter.annotation)
+ # sub_types refers to T inside the annotation
+ sub_types = typing.get_args(parameter.annotation)
+ return origin is typing.Union and len(sub_types) > 0 and sub_types[-1] is type(None)
def _parse_tool_parameters_schema(func) -> JSONSchemaFunctionParameters:
- # TODO: Add tests for this, 100% it is breakable
- signature = inspect.signature(func)
+ properties: dict[str, Any] = {}
required: list[str] = []
- properties: dict[str, Union[JSONSchemaArray, JSONSchemaProperty, JSONSchemaObject]] = {}
+ signature = inspect.signature(func)
+
for parameter in signature.parameters.values():
if parameter.kind in (
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
):
- raise ValueError("Varargs and kwargs are not supported")
- origin = typing.get_origin(parameter.annotation)
- param_schema: Union[JSONSchemaProperty, JSONSchemaArray, JSONSchemaObject]
- if origin is Union:
- param_schema = _handle_optional_annotation(parameter)
- elif origin is None:
- param_schema = _handle_simple_type(parameter)
- required.append(parameter.name)
- elif isinstance(origin, dict):
- param_schema = _handle_dict_annotation(parameter)
- required.append(parameter.name)
- elif isinstance(origin, list):
- param_schema = _handle_list_annotation(parameter)
+ raise ValueError(f"{func.__name__}: Varargs and kwargs are not supported by the @tool decorator")
+
+ for parameter in signature.parameters.values():
+ try:
+ parameter_signature = _extract_annotation_signature(parameter.annotation)
+ except ValueError as e:
+ raise ValueError(f"{func.__name__}: {parameter.name} lacks a type hint annotation") from e
+ param_json_schema = _build_json_schema_parameter(parameter_signature)
+ properties[parameter.name] = param_json_schema
+ if not _parameter_is_optional(parameter):
required.append(parameter.name)
- else:
- raise ValueError("Unsupported type hint ", parameter)
- properties[parameter.name] = param_schema
+
+ if len(properties) == 0 and len(required) == 0:
+ # Edge case: function with no parameters
+ return JSONSchemaFunctionParameters(
+ type="object",
+ properties={},
+ required=[],
+ )
return JSONSchemaFunctionParameters(
type="object",
# False positive, expected tuple[str] but got tuple[str, ...]
@@ -137,48 +150,56 @@ def _parse_tool_parameters_schema(func) -> JSONSchemaFunctionParameters:
)
-class JSONSchemaFunction(TypedDict):
- name: str
- description: str
- parameters: JSONSchemaFunctionParameters
-
-
-def _tool_json_schema(func: Callable) -> JSONSchemaFunction:
+def _tool_json_schema(func: Callable, strict: bool) -> ToolFunction:
tool_name = func.__name__
description = func.__doc__
if description is None:
description = ""
- return JSONSchemaFunction(
+ return ToolFunction(
name=tool_name,
description=description,
parameters=_parse_tool_parameters_schema(func),
+ strict=strict,
)
-class ToolKernel(TypedDict):
- source_code: str
- function: JSONSchemaFunction
- tool_type: Literal["json_schema"]
- strict: Literal[True]
-
-
-def _extract_tool_kernel(func: Callable) -> ToolKernel:
- return ToolKernel(
+def _build_tool_kernel(
+ func: Callable,
+ attributes: Optional[dict[str, Optional[Any]]],
+ setup_values: Optional[dict[str, Optional[Any]]],
+ strict: bool,
+) -> ToolKernelRequest:
+ return ToolKernelRequest(
source_code=textwrap.dedent(
# Remove the tool decorator from source code
inspect.getsource(func).split("\n", maxsplit=1)[1]
),
- function=_tool_json_schema(func=func),
- tool_type="json_schema",
- strict=True,
+ attributes=attributes,
+ setup_values=setup_values,
+ function=_tool_json_schema(
+ func=func,
+ strict=strict,
+ ),
)
-def tool(path: Optional[str] = None, attributes: Optional[dict[str, typing.Any]] = None):
+def tool(
+ path: Optional[str] = None,
+ setup_values: Optional[dict[str, Optional[Any]]] = None,
+ attributes: Optional[dict[str, typing.Any]] = None,
+ strict: bool = True,
+):
def decorator(func: Callable):
# Complains about adding attribute on function
# Nice UX, but mypy doesn't like it
- func.json_schema = _tool_json_schema(func) # type: ignore
+ file_obj = _build_tool_kernel(
+ func=func,
+ attributes=attributes,
+ setup_values=setup_values,
+ strict=strict,
+ )
+
+ func.json_schema = file_obj.function.model_dump() # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
@@ -214,10 +235,7 @@ def wrapper(*args, **kwargs):
key=HL_FILE_OT_KEY,
value={
"path": path if path else func.__name__,
- "tool": {
- **_extract_tool_kernel(func),
- "attributes": attributes,
- },
+ "tool": file_obj.model_dump(),
},
)
write_to_opentelemetry_span(
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 73f08bec..eb935536 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -109,39 +109,27 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt_kernel: dict[str, Any] = hl_file.get("prompt", {})
- if "model" not in prompt_kernel:
- prompt_kernel["model"] = gen_ai_object.get("request", {}).get("model", None)
- if "endpoint" not in prompt_kernel:
- prompt_kernel["endpoint"] = llm_object.get("request", {}).get("type")
- if "template" not in prompt_kernel:
- prompt_kernel["template"] = hl_file.get("prompt", {}).get("template", None)
- if "provider" not in prompt_kernel:
- prompt_kernel["provider"] = gen_ai_object.get("system", None)
- if prompt_kernel["provider"]:
- prompt_kernel["provider"] = prompt_kernel["provider"].lower()
- if prompt_kernel["provider"] not in [
- "openai",
- "openai_azure",
- "mock",
- "anthropic",
- "bedrock",
- "cohere",
- "replicate",
- "google",
- "groq",
- ]:
- raise ValueError("Invalid provider")
- if "temperature" not in prompt_kernel:
- prompt_kernel["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
- if "top_p" not in prompt_kernel:
- prompt_kernel["top_p"] = gen_ai_object.get("request", {}).get("top_p", None)
- if "max_tokens" not in prompt_kernel:
- prompt_kernel["max_tokens"] = gen_ai_object.get("request", {}).get("max_tokens", None)
- if "presence_penalty" not in prompt_kernel:
- prompt_kernel["presence_penalty"] = llm_object.get("presence_penalty", None)
- if "frequency_penalty" not in prompt_kernel:
- prompt_kernel["frequency_penalty"] = llm_object.get("frequency_penalty", None)
+ prompt = hl_file.get("prompt", {})
+ if not prompt.get("model"):
+ prompt["model"] = gen_ai_object.get("request", {}).get("model", None)
+ if not prompt.get("endpoint"):
+ prompt["endpoint"] = llm_object.get("request", {}).get("type")
+ if not prompt.get("provider"):
+ prompt["provider"] = gen_ai_object.get("system", None)
+ if prompt["provider"]:
+ prompt["provider"] = prompt["provider"].lower()
+ if not prompt.get("temperature"):
+ prompt["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
+ if not prompt.get("top_p"):
+ prompt["top_p"] = gen_ai_object.get("request", {}).get("top_p", None)
+ if not prompt.get("max_tokens"):
+ prompt["max_tokens"] = gen_ai_object.get("request", {}).get("max_tokens", None)
+ if not prompt.get("presence_penalty"):
+ prompt["presence_penalty"] = llm_object.get("presence_penalty", None)
+ if not prompt.get("frequency_penalty"):
+ prompt["frequency_penalty"] = llm_object.get("frequency_penalty", None)
+
+ hl_file["prompt"] = prompt
write_to_opentelemetry_span(
span=prompt_span,
diff --git a/tests/conftest.py b/tests/conftest.py
index 5c5597f3..d71eed78 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -83,6 +83,7 @@ def opentelemetry_hl_test_configuration(
GroqInstrumentor(),
CohereInstrumentor(),
ReplicateInstrumentor(),
+ AnthropicInstrumentor(),
]
for instrumentor in instrumentors:
instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 5995f5cd..f6b1e57c 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -41,6 +41,10 @@ def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
) + _random_string()
+def _agent_call_no_decorator(messages: list[dict]) -> str:
+ return _call_llm(messages=messages)
+
+
@flow(attributes={"foo": "bar", "baz": 7})
def _agent_call(messages: list[dict]) -> str:
return _call_llm(messages=messages)
@@ -51,7 +55,7 @@ def _flow_over_flow(messages: list[dict]) -> str:
return _agent_call(messages=messages)
-def test_no_flow(
+def test_decorators_without_flow(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
# GIVEN a call to @prompt annotated function that calls a @tool
@@ -80,7 +84,7 @@ def test_no_flow(
read_from_opentelemetry_span(span=span, key=HL_TRACE_METADATA_KEY)
-def test_with_flow(
+def test_decorators_with_flow_decorator(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
# GIVEN a @flow entrypoint to an instrumented application
@@ -118,7 +122,7 @@ def test_with_flow(
assert flow_trace_metadata["trace_id"] == spans[2].context.span_id
-def test_flow_in_flow(
+def test_flow_decorator_flow_in_flow(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[dict],
):
@@ -153,7 +157,7 @@ def test_flow_in_flow(
assert flow_trace_metadata["trace_id"] == spans[3].context.span_id
-def test_hl_exporter_with_flow(
+def test_flow_decorator_with_hl_exporter(
call_llm_messages: list[dict],
opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
):
@@ -202,7 +206,7 @@ def test_hl_exporter_with_flow(
tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id
-def test_nested_flow_exporting(
+def test_flow_decorator_hl_exporter_flow_inside_flow(
call_llm_messages: list[dict],
opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
):
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index debe429b..964c8bf2 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -1,68 +1,89 @@
import os
-from typing import Any, Literal, Optional
-import typing
+from typing import Optional
import cohere
import pytest
# replicate has no typing stubs
import replicate # type: ignore
+from anthropic import Anthropic
+from anthropic.types.message_param import MessageParam
from dotenv import load_dotenv
from groq import Groq
+from groq import NotFoundError as GroqNotFoundError
from humanloop.decorators.prompt import prompt
from humanloop.otel.constants import HL_FILE_OT_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
+from humanloop.types.model_providers import ModelProviders
+from humanloop.types.prompt_kernel_request import PromptKernelRequest
from openai import OpenAI
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+_PROVIDER_AND_MODEL = [
+ ("openai", "gpt-4o"),
+ ("groq", "llama3-8b-8192"),
+ ("cohere", "command"),
+ ("replicate", "meta/meta-llama-3-8b-instruct"),
+ ("anthropic", "claude-3-opus-latest"),
+]
-def _call_llm_base(provider: Literal["openai", "anthropic"], messages: list[dict]) -> Optional[str]:
+
+def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]:
load_dotenv()
if provider == "openai":
# NOTE: These tests check if instrumentors are capable of intercepting OpenAI
# provider calls. Could not find a way to intercept them coming from a Mock.
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore
return (
client.chat.completions.create(
- model="gpt-4o",
+ model=model,
messages=messages, # type: ignore
temperature=0.8,
)
.choices[0]
.message.content
)
- # if provider == "anthropic":
- # client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
- # return client.messages.create(
- # model="claude-3-opus",
- # messages=messages,
- # max_tokens=200,
- # ).content
- # if provider == "mistralai":
- # client = MistralClient(api_key=os.getenv("MISTRAL_API_KEY"))
- # response = client.chat(model="mistral-small-latest", messages=messages, temperature=0.8)
- # return response.choices[0].message.content
- if provider == "groq":
- # Note GROQ might be unavailable, leading to
- # test failure. Returns groq.NotFoundError: Not Found
- client = Groq(
- # This is the default and can be omitted
- api_key=os.environ.get("GROQ_API_KEY"),
- )
+ if provider == "anthropic":
+ client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore
+ messages_anthropic_format = [
+ MessageParam(
+ content=message["content"],
+ role="user" if message["role"] in ("user", "system") else "assistant",
+ )
+ for message in messages
+ ]
return (
- client.chat.completions.create(
- messages=messages,
- model="llama3-8b-8192",
+ client.messages.create( # type: ignore
+ model=model,
+ messages=messages_anthropic_format,
+ max_tokens=200,
temperature=0.8,
)
- .choices[0]
- .message.content
+ .content[0]
+ .text
)
+ if provider == "groq":
+ try:
+ client = Groq( # type: ignore
+ # This is the default and can be omitted
+ api_key=os.environ.get("GROQ_API_KEY"),
+ )
+ return (
+ client.chat.completions.create(
+ messages=messages, # type: ignore
+ model=model,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+ except GroqNotFoundError:
+ pytest.skip("GROQ not available")
if provider == "cohere":
- client = cohere.Client(api_key=os.getenv("COHERE_API_KEY"))
- messages_cohere_format = []
+ client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore
+ messages_cohere_format: list[cohere.Message] = []
for message in messages:
if message["role"] == "system":
messages_cohere_format.append(cohere.SystemMessage(message=message["content"]))
@@ -70,9 +91,9 @@ def _call_llm_base(provider: Literal["openai", "anthropic"], messages: list[dict
messages_cohere_format.append(cohere.UserMessage(message=message["content"]))
elif message["role"] == "assistant":
messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"]))
- return client.chat(
+ return client.chat( # type: ignore
chat_history=messages_cohere_format,
- model="command",
+ model=model,
max_tokens=200,
message=messages[-1]["content"],
temperature=0.8,
@@ -82,7 +103,7 @@ def _call_llm_base(provider: Literal["openai", "anthropic"], messages: list[dict
replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY")
output = ""
for event in replicate.run(
- "meta/meta-llama-3-8b-instruct",
+ model,
input={
"prompt": messages[0]["content"] + " " + messages[-1]["content"],
"temperature": 0.8,
@@ -106,24 +127,21 @@ def _call_llm_base(provider: Literal["openai", "anthropic"], messages: list[dict
)(_call_llm_base)
-@pytest.mark.parametrize(
- "provider",
- (
- "openai",
- "groq",
- "cohere",
- "replicate",
- ),
-)
-def test_prompt(
- provider: str,
+@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
+def test_prompt_decorator(
+ provider_model: tuple[str, str],
opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
+ provider, model = provider_model
# GIVEN a default OpenTelemetry configuration
_, exporter = opentelemetry_test_configuration
# WHEN using the Prompt decorator
- _call_llm(provider=provider, messages=call_llm_messages)
+ _call_llm(
+ provider=provider,
+ model=model,
+ messages=call_llm_messages,
+ )
# THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
spans = exporter.get_finished_spans()
assert len(spans) == 2
@@ -132,64 +150,62 @@ def test_prompt(
assert spans[1].attributes.get("prompt") is None # type: ignore
-@pytest.mark.parametrize(
- "provider",
- (
- "openai",
- "groq",
- "cohere",
- "replicate",
- ),
-)
-def test_prompt_hl_processor(
- provider: str,
+@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
+def test_prompt_decorator_with_hl_processor(
+ provider_model: tuple[str, str],
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
+ provider, model = provider_model
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator
- _call_llm(provider=provider, messages=call_llm_messages)
+ _call_llm(
+ provider=provider,
+ model=model,
+ messages=call_llm_messages,
+ )
# THEN a single span is created since the LLM provider call span is merged in the Prompt span
spans = exporter.get_finished_spans()
assert len(spans) == 1
assert is_humanloop_span(span=spans[0])
- prompt: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
- assert prompt is not None
+ prompt = PromptKernelRequest.model_validate(
+ read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
+ )
# THEN temperature is taken from LLM provider call, but top_p is not since it is not specified
- assert prompt["temperature"] == 0.8
- assert prompt["provider"] == provider
- assert prompt.get("top_p") is None
+ assert prompt.temperature == 0.8
+ assert prompt.provider == provider
+ assert prompt.top_p is None
+ assert prompt.model == model
-@pytest.mark.parametrize(
- "provider",
- (
- "openai",
- "groq",
- "cohere",
- "replicate",
- ),
-)
-def test_prompt_with_defaults(
- provider: str,
+@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
+def test_prompt_decorator_with_defaults(
+ provider_model: tuple[str, str],
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
call_llm_messages: list[ChatCompletionMessageParam],
):
+ provider, model = provider_model
# GIVEN an OpenTelemetry configuration with a Humanloop Span processor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator with default values
- _call_llm_with_defaults(provider=provider, messages=call_llm_messages)
+ _call_llm_with_defaults(
+ provider=provider,
+ model=model,
+ messages=call_llm_messages,
+ )
# THEN a single span is created since the LLM provider call span is merged in the Prompt span
spans = exporter.get_finished_spans()
assert len(spans) == 1
assert is_humanloop_span(spans[0])
- prompt: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
- assert prompt is not None
+ prompt = PromptKernelRequest.model_validate(
+ read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
+ )
# THEN temperature is taken from decorator rather than intercepted LLM provider call
- assert prompt["temperature"] == 0.9
+ assert prompt.temperature == 0.9
# THEN top_p is present
- assert prompt["top_p"] == 0.1
+ assert prompt.top_p == 0.1
+ assert prompt.model == model
@pytest.mark.parametrize(
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index d3958f17..d19f3df9 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -1,30 +1,32 @@
-from typing import Any
+from typing import Any, Optional, Union
+import pytest
from humanloop.decorators.tool import tool
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
from humanloop.otel.helpers import read_from_opentelemetry_span
+from humanloop.types.tool_kernel_request import ToolKernelRequest
+from jsonschema.protocols import Validator
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-@tool()
-def calculator(operation: str, num1: float, num2: float) -> float:
- """Do arithmetic operations on two numbers."""
- if operation == "add":
- return num1 + num2
- elif operation == "subtract":
- return num1 - num2
- elif operation == "multiply":
- return num1 * num2
- elif operation == "divide":
- return num1 / num2
- else:
- raise ValueError(f"Invalid operation: {operation}")
-
-
def test_calculator_decorator(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
+ @tool()
+ def calculator(operation: str, num1: float, num2: float) -> float:
+ """Do arithmetic operations on two numbers."""
+ if operation == "add":
+ return num1 + num2
+ elif operation == "subtract":
+ return num1 - num2
+ elif operation == "multiply":
+ return num1 * num2
+ elif operation == "divide":
+ return num1 / num2
+ else:
+ raise ValueError(f"Invalid operation: {operation}")
+
# GIVEN a test OpenTelemetry configuration
_, exporter = opentelemetry_hl_test_configuration
# WHEN calling the @tool decorated function
@@ -41,4 +43,169 @@ def test_calculator_decorator(
"num2": 2,
}
hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers."
+ # TODO: pydantic is inconsistent by dumping either tuple or list
assert calculator.json_schema == hl_file["tool"]["function"]
+
+ Validator.check_schema(calculator.json_schema)
+
+
+def test_union_type():
+ @tool()
+ def foo(a: Union[int, float], b: float) -> float:
+ return a + b
+
+ assert foo.json_schema["parameters"]["properties"]["a"] == {
+ "anyOf": [
+ {"type": "integer"},
+ {"type": "number"},
+ ]
+ }
+ assert foo.json_schema["parameters"]["properties"]["b"] == {"type": "number"}
+ assert foo.json_schema["parameters"]["required"] == ("a", "b")
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_not_required_parameter(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ _, exporter = opentelemetry_hl_test_configuration
+
+ @tool()
+ def test_calculator(a: Optional[float], b: float) -> float:
+ if a is None:
+ a = 0
+ return a + b
+
+ assert test_calculator(3, 4) == 7
+ assert len(spans := exporter.get_finished_spans()) == 1
+ tool_kernel = ToolKernelRequest.model_validate(read_from_opentelemetry_span(spans[0], HL_FILE_OT_KEY)["tool"])
+ assert test_calculator.json_schema["parameters"]["properties"]["a"] == {"type": ["number", "null"]}
+ assert tool_kernel.function.parameters["required"] == ("b",) # type: ignore
+
+ Validator.check_schema(test_calculator.json_schema)
+
+
+def test_no_annotation_on_parameter_fails():
+ with pytest.raises(ValueError) as exc:
+
+ @tool()
+ def bad_tool(a: Optional[float], b) -> float:
+ if a is None:
+ a = 0
+ return a + b
+
+ assert exc.value.args[0] == "bad_tool: b lacks a type hint annotation"
+
+
+def test_no_annotation_function_returns_does_not_fail():
+ @tool()
+ def foo(a: Optional[float], b: float) -> float:
+ """Add two numbers."""
+ if a is None:
+ a = 0
+ return a + b
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_list_annotation_parameter(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ _, exporter = opentelemetry_hl_test_configuration
+
+ @tool()
+ def foo(to_join: list[str]) -> str:
+ return " ".join(to_join)
+
+ assert "a b c" == foo(to_join=["a", "b", "c"])
+
+ assert len(spans := exporter.get_finished_spans()) == 1
+
+ tool_kernel = ToolKernelRequest.model_validate(read_from_opentelemetry_span(spans[0], HL_FILE_OT_KEY)["tool"])
+
+ assert "to_join" in tool_kernel.function.parameters["required"] # type: ignore
+ assert tool_kernel.function.parameters["properties"]["to_join"] == { # type: ignore
+ "type": "array",
+ "items": {"type": "string"},
+ }
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_list_list_parameter_annotation():
+ @tool()
+ def nested_plain_join(to_join: list[list[str]]):
+ return " ".join([val for sub_list in to_join for val in sub_list])
+
+ assert nested_plain_join.json_schema["parameters"]["properties"]["to_join"] == {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {"type": "string"},
+ },
+ }
+
+ Validator.check_schema(nested_plain_join.json_schema)
+
+
+def test_complex_dict_annotation():
+ @tool()
+ def foo(a: dict[Union[int, str], list[str]]):
+ return a
+
+ assert foo.json_schema["parameters"]["properties"]["a"] == {
+ "type": "object",
+ "properties": {
+ "key": {"anyOf": [{"type": "integer"}, {"type": "string"}]},
+ "value": {"type": "array", "items": {"type": "string"}},
+ },
+ }
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_tuple_annotation():
+ @tool()
+ def foo(a: Optional[tuple[int, Optional[str], float]]):
+ return a
+
+ assert foo.json_schema["parameters"]["properties"]["a"] == {
+ "type": ["array", "null"],
+ "items": [
+ {"type": "integer"},
+ {"type": ["string", "null"]},
+ {"type": "number"},
+ ],
+ }
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_strict_false():
+ @tool(strict=False)
+ def foo(a: int, b: int) -> int:
+ return a + b
+
+ assert foo.json_schema["strict"] is False
+
+ Validator.check_schema(foo.json_schema)
+
+
+def test_tool_no_args():
+ @tool()
+ def foo():
+ return 42
+
+ assert foo.json_schema == {
+ "description": "",
+ "name": "foo",
+ "parameters": {
+ "properties": {},
+ "required": [],
+ "type": "object",
+ },
+ "strict": True,
+ }
+
+ Validator.check_schema(foo.json_schema)
From 7449b5ced4573a50b50d5f7ed13fe83b21d058dd Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 15:41:57 +0000
Subject: [PATCH 16/70] Revisited docstrings
---
poetry.lock | 178 ++------
pyproject.toml | 51 ++-
src/humanloop/client.py | 234 +++++++---
src/humanloop/decorators/flow.py | 48 ++-
src/humanloop/decorators/helpers.py | 12 +
src/humanloop/decorators/prompt.py | 40 +-
src/humanloop/decorators/tool.py | 502 +++++++++++++++-------
src/humanloop/otel/__init__.py | 81 ++--
src/humanloop/otel/exporter.py | 111 +++--
src/humanloop/otel/helpers.py | 99 ++++-
src/humanloop/otel/processor.py | 23 +-
tests/conftest.py | 22 +
tests/decorators/test_flow_decorator.py | 191 +++++---
tests/decorators/test_prompt_decorator.py | 51 ++-
tests/decorators/test_tool_decorator.py | 198 ++++++++-
tests/otel/test_helpers.py | 4 +-
16 files changed, 1255 insertions(+), 590 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index fc30b989..54edca5a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -471,13 +471,13 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.26.1"
+version = "0.26.2"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "huggingface_hub-0.26.1-py3-none-any.whl", hash = "sha256:5927a8fc64ae68859cd954b7cc29d1c8390a5e15caba6d3d349c973be8fdacf3"},
- {file = "huggingface_hub-0.26.1.tar.gz", hash = "sha256:414c0d9b769eecc86c70f9d939d0f48bb28e8461dd1130021542eff0212db890"},
+ {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"},
+ {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"},
]
[package.dependencies]
@@ -664,22 +664,6 @@ files = [
[package.dependencies]
referencing = ">=0.31.0"
-[[package]]
-name = "mistralai"
-version = "0.4.2"
-description = ""
-optional = false
-python-versions = "<4.0,>=3.9"
-files = [
- {file = "mistralai-0.4.2-py3-none-any.whl", hash = "sha256:63c98eea139585f0a3b2c4c6c09c453738bac3958055e6f2362d3866e96b0168"},
- {file = "mistralai-0.4.2.tar.gz", hash = "sha256:5eb656710517168ae053f9847b0bb7f617eda07f1f93f946ad6c91a4d407fd93"},
-]
-
-[package.dependencies]
-httpx = ">=0.25,<1"
-orjson = ">=3.9.10,<3.11"
-pydantic = ">=2.5.2,<3"
-
[[package]]
name = "mypy"
version = "1.0.1"
@@ -739,13 +723,13 @@ files = [
[[package]]
name = "openai"
-version = "1.52.2"
+version = "1.53.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.7.1"
files = [
- {file = "openai-1.52.2-py3-none-any.whl", hash = "sha256:57e9e37bc407f39bb6ec3a27d7e8fb9728b2779936daa1fcf95df17d3edfaccc"},
- {file = "openai-1.52.2.tar.gz", hash = "sha256:87b7d0f69d85f5641678d414b7ee3082363647a5c66a462ed7f3ccb59582da0d"},
+ {file = "openai-1.53.0-py3-none-any.whl", hash = "sha256:20f408c32fc5cb66e60c6882c994cdca580a5648e10045cd840734194f033418"},
+ {file = "openai-1.53.0.tar.gz", hash = "sha256:be2c4e77721b166cce8130e544178b7d579f751b4b074ffbaade3854b6f85ec5"},
]
[package.dependencies]
@@ -794,106 +778,89 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.33.3"
+version = "0.33.5"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.33.3-py3-none-any.whl", hash = "sha256:dc4110c6400708d600f79fd78e8e8fe04b90a82b44949817cc91c961cd4db6e7"},
- {file = "opentelemetry_instrumentation_anthropic-0.33.3.tar.gz", hash = "sha256:d245f1c732caebe4706a4900084758296d1d46d37e042bbd8542d0aa0e691899"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.5-py3-none-any.whl", hash = "sha256:3e94d6293c28e805957a5c665bfabb7b4a5165b50dee7e6940b1b3606598cc09"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.5.tar.gz", hash = "sha256:a7856cd47926b61b2fa722e1a5f30612fed23863a5211c156feda483c294a5eb"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.33.3"
+version = "0.33.5"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.33.3-py3-none-any.whl", hash = "sha256:b0a614a321f332e31eb74980a603303123b58a3627a11e7db5f13a8b3c660311"},
- {file = "opentelemetry_instrumentation_cohere-0.33.3.tar.gz", hash = "sha256:9d940cb30b7e4be94f063f5afadeb2572f4cfe69a731d7c45faaa9f034991a5e"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.5-py3-none-any.whl", hash = "sha256:bd69ae1f87d531ca6cf91eb5e5b4cd1eefeaa9da43a39725d7b90e3dd3d27158"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.5.tar.gz", hash = "sha256:3bab99113f1cbd3d592f9e0f217e275375bd7c0ed9ab62931d8a31e317033f84"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.33.3"
+version = "0.33.5"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.33.3-py3-none-any.whl", hash = "sha256:53d75f8ec2dbcf5e0f06ed53a7a4cb875823749cb96bbc07dbb7a1d5ee374e32"},
- {file = "opentelemetry_instrumentation_groq-0.33.3.tar.gz", hash = "sha256:98408aaf91e2d55ad348deb12666339fbcb972b18ec511c4f394d3fac37041eb"},
+ {file = "opentelemetry_instrumentation_groq-0.33.5-py3-none-any.whl", hash = "sha256:c772545fbffa68f508457eaf2f7a7ae55edf4c5e66ed6ec7692a234c43f69c81"},
+ {file = "opentelemetry_instrumentation_groq-0.33.5.tar.gz", hash = "sha256:1c123a93a5582407911a33f3e8bfcb8abedfcac9f279aa5739179c5482d73d42"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
-
-[[package]]
-name = "opentelemetry-instrumentation-mistralai"
-version = "0.33.3"
-description = "OpenTelemetry Mistral AI instrumentation"
-optional = false
-python-versions = "<4,>=3.9"
-files = [
- {file = "opentelemetry_instrumentation_mistralai-0.33.3-py3-none-any.whl", hash = "sha256:5e2eb745bf6e35ff6dbb24abe6b88a62978b07214f1a8b17e0629321bd385e0f"},
- {file = "opentelemetry_instrumentation_mistralai-0.33.3.tar.gz", hash = "sha256:3b37aac02fe16acc3aa781140135a6c46cf0acaade73aad2820d38037927e788"},
-]
-
-[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.33.3"
+version = "0.33.5"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.33.3-py3-none-any.whl", hash = "sha256:f5ef4452b269bb409cc260fd611834c33296495e39700fd6e6f83a1cef07b9fd"},
- {file = "opentelemetry_instrumentation_openai-0.33.3.tar.gz", hash = "sha256:06ad92d5d852f93ee7c0d9b545a412df5265044dae4d6be7056a10fa8afb2fdc"},
+ {file = "opentelemetry_instrumentation_openai-0.33.5-py3-none-any.whl", hash = "sha256:bf9d238af2e37ad15f6e7e199f5f0886d38f5ccea05e9ad08375513b8247b4a3"},
+ {file = "opentelemetry_instrumentation_openai-0.33.5.tar.gz", hash = "sha256:0e7f5eb8e67ef5c14e80f040484f42cfcc04a9ebb0fc53947663a27ee3470e4f"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-semantic-conventions-ai = "0.4.2"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.33.3"
+version = "0.33.5"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.33.3-py3-none-any.whl", hash = "sha256:c2870c1939b69ff3c57a508404cec75329e07c907eb9600f47ec64be2c0b8310"},
- {file = "opentelemetry_instrumentation_replicate-0.33.3.tar.gz", hash = "sha256:06c9f63f7c235392567b10efe20f8cb2379f322d0a72e4c52ab4912f1ebb943a"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.5-py3-none-any.whl", hash = "sha256:213c47dad5bebd6f26b2e7ab50529ce082782ee56f0e2752c86d86532474e6eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.5.tar.gz", hash = "sha256:d039cba78589dbefdf2e3c76eb4461b940db644260e88fba71a24ff413ce83cd"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-sdk"
@@ -928,80 +895,13 @@ opentelemetry-api = "1.27.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.1"
+version = "0.4.2"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"},
-]
-
-[[package]]
-name = "orjson"
-version = "3.10.10"
-description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "orjson-3.10.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b788a579b113acf1c57e0a68e558be71d5d09aa67f62ca1f68e01117e550a998"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:804b18e2b88022c8905bb79bd2cbe59c0cd014b9328f43da8d3b28441995cda4"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9972572a1d042ec9ee421b6da69f7cc823da5962237563fa548ab17f152f0b9b"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc6993ab1c2ae7dd0711161e303f1db69062955ac2668181bfdf2dd410e65258"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d78e4cacced5781b01d9bc0f0cd8b70b906a0e109825cb41c1b03f9c41e4ce86"},
- {file = "orjson-3.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6eb2598df518281ba0cbc30d24c5b06124ccf7e19169e883c14e0831217a0bc"},
- {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:23776265c5215ec532de6238a52707048401a568f0fa0d938008e92a147fe2c7"},
- {file = "orjson-3.10.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8cc2a654c08755cef90b468ff17c102e2def0edd62898b2486767204a7f5cc9c"},
- {file = "orjson-3.10.10-cp310-none-win32.whl", hash = "sha256:081b3fc6a86d72efeb67c13d0ea7c030017bd95f9868b1e329a376edc456153b"},
- {file = "orjson-3.10.10-cp310-none-win_amd64.whl", hash = "sha256:ff38c5fb749347768a603be1fb8a31856458af839f31f064c5aa74aca5be9efe"},
- {file = "orjson-3.10.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:879e99486c0fbb256266c7c6a67ff84f46035e4f8749ac6317cc83dacd7f993a"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:019481fa9ea5ff13b5d5d95e6fd5ab25ded0810c80b150c2c7b1cc8660b662a7"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0dd57eff09894938b4c86d4b871a479260f9e156fa7f12f8cad4b39ea8028bb5"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbde6d70cd95ab4d11ea8ac5e738e30764e510fc54d777336eec09bb93b8576c"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2625cb37b8fb42e2147404e5ff7ef08712099197a9cd38895006d7053e69d6"},
- {file = "orjson-3.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbf3c20c6a7db69df58672a0d5815647ecf78c8e62a4d9bd284e8621c1fe5ccb"},
- {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:75c38f5647e02d423807d252ce4528bf6a95bd776af999cb1fb48867ed01d1f6"},
- {file = "orjson-3.10.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:23458d31fa50ec18e0ec4b0b4343730928296b11111df5f547c75913714116b2"},
- {file = "orjson-3.10.10-cp311-none-win32.whl", hash = "sha256:2787cd9dedc591c989f3facd7e3e86508eafdc9536a26ec277699c0aa63c685b"},
- {file = "orjson-3.10.10-cp311-none-win_amd64.whl", hash = "sha256:6514449d2c202a75183f807bc755167713297c69f1db57a89a1ef4a0170ee269"},
- {file = "orjson-3.10.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8564f48f3620861f5ef1e080ce7cd122ee89d7d6dacf25fcae675ff63b4d6e05"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bf161a32b479034098c5b81f2608f09167ad2fa1c06abd4e527ea6bf4837a9"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:68b65c93617bcafa7f04b74ae8bc2cc214bd5cb45168a953256ff83015c6747d"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8e28406f97fc2ea0c6150f4c1b6e8261453318930b334abc419214c82314f85"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4d0d9fe174cc7a5bdce2e6c378bcdb4c49b2bf522a8f996aa586020e1b96cee"},
- {file = "orjson-3.10.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3be81c42f1242cbed03cbb3973501fcaa2675a0af638f8be494eaf37143d999"},
- {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:65f9886d3bae65be026219c0a5f32dbbe91a9e6272f56d092ab22561ad0ea33b"},
- {file = "orjson-3.10.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:730ed5350147db7beb23ddaf072f490329e90a1d059711d364b49fe352ec987b"},
- {file = "orjson-3.10.10-cp312-none-win32.whl", hash = "sha256:a8f4bf5f1c85bea2170800020d53a8877812892697f9c2de73d576c9307a8a5f"},
- {file = "orjson-3.10.10-cp312-none-win_amd64.whl", hash = "sha256:384cd13579a1b4cd689d218e329f459eb9ddc504fa48c5a83ef4889db7fd7a4f"},
- {file = "orjson-3.10.10-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44bffae68c291f94ff5a9b4149fe9d1bdd4cd0ff0fb575bcea8351d48db629a1"},
- {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e27b4c6437315df3024f0835887127dac2a0a3ff643500ec27088d2588fa5ae1"},
- {file = "orjson-3.10.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca84df16d6b49325a4084fd8b2fe2229cb415e15c46c529f868c3387bb1339d"},
- {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c14ce70e8f39bd71f9f80423801b5d10bf93d1dceffdecd04df0f64d2c69bc01"},
- {file = "orjson-3.10.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:24ac62336da9bda1bd93c0491eff0613003b48d3cb5d01470842e7b52a40d5b4"},
- {file = "orjson-3.10.10-cp313-none-win32.whl", hash = "sha256:eb0a42831372ec2b05acc9ee45af77bcaccbd91257345f93780a8e654efc75db"},
- {file = "orjson-3.10.10-cp313-none-win_amd64.whl", hash = "sha256:f0c4f37f8bf3f1075c6cc8dd8a9f843689a4b618628f8812d0a71e6968b95ffd"},
- {file = "orjson-3.10.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:829700cc18503efc0cf502d630f612884258020d98a317679cd2054af0259568"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0ceb5e0e8c4f010ac787d29ae6299846935044686509e2f0f06ed441c1ca949"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c25908eb86968613216f3db4d3003f1c45d78eb9046b71056ca327ff92bdbd4"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:218cb0bc03340144b6328a9ff78f0932e642199ac184dd74b01ad691f42f93ff"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e2277ec2cea3775640dc81ab5195bb5b2ada2fe0ea6eee4677474edc75ea6785"},
- {file = "orjson-3.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:848ea3b55ab5ccc9d7bbd420d69432628b691fba3ca8ae3148c35156cbd282aa"},
- {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e3e67b537ac0c835b25b5f7d40d83816abd2d3f4c0b0866ee981a045287a54f3"},
- {file = "orjson-3.10.10-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7948cfb909353fce2135dcdbe4521a5e7e1159484e0bb024c1722f272488f2b8"},
- {file = "orjson-3.10.10-cp38-none-win32.whl", hash = "sha256:78bee66a988f1a333dc0b6257503d63553b1957889c17b2c4ed72385cd1b96ae"},
- {file = "orjson-3.10.10-cp38-none-win_amd64.whl", hash = "sha256:f1d647ca8d62afeb774340a343c7fc023efacfd3a39f70c798991063f0c681dd"},
- {file = "orjson-3.10.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5a059afddbaa6dd733b5a2d76a90dbc8af790b993b1b5cb97a1176ca713b5df8"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f9b5c59f7e2a1a410f971c5ebc68f1995822837cd10905ee255f96074537ee6"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d5ef198bafdef4aa9d49a4165ba53ffdc0a9e1c7b6f76178572ab33118afea25"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf29ce0bb5d3320824ec3d1508652421000ba466abd63bdd52c64bcce9eb1fa"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dddd5516bcc93e723d029c1633ae79c4417477b4f57dad9bfeeb6bc0315e654a"},
- {file = "orjson-3.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12f2003695b10817f0fa8b8fca982ed7f5761dcb0d93cff4f2f9f6709903fd7"},
- {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:672f9874a8a8fb9bb1b771331d31ba27f57702c8106cdbadad8bda5d10bc1019"},
- {file = "orjson-3.10.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dcbb0ca5fafb2b378b2c74419480ab2486326974826bbf6588f4dc62137570a"},
- {file = "orjson-3.10.10-cp39-none-win32.whl", hash = "sha256:d9bbd3a4b92256875cb058c3381b782649b9a3c68a4aa9a2fff020c2f9cfc1be"},
- {file = "orjson-3.10.10-cp39-none-win_amd64.whl", hash = "sha256:766f21487a53aee8524b97ca9582d5c6541b03ab6210fbaf10142ae2f3ced2aa"},
- {file = "orjson-3.10.10.tar.gz", hash = "sha256:37949383c4df7b4337ce82ee35b6d7471e55195efa7dcb45ab8226ceadb0fe3b"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"},
]
[[package]]
@@ -1626,23 +1526,23 @@ files = [
[[package]]
name = "setuptools"
-version = "75.2.0"
+version = "75.3.0"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8"},
- {file = "setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec"},
+ {file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"},
+ {file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"},
]
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
-core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
-type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.12.*)", "pytest-mypy"]
[[package]]
name = "six"
@@ -1843,13 +1743,13 @@ files = [
[[package]]
name = "tqdm"
-version = "4.66.5"
+version = "4.66.6"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
- {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"},
- {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"},
+ {file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"},
+ {file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"},
]
[package.dependencies]
@@ -2029,4 +1929,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "ea6486f3664117d7ad905484926afaae5e9f260583f3a9b398ac5580f4bfc6a8"
+content-hash = "7fcc781f96cb6cefe8ec50a22f6410b4b20f02651e787d06dbad4cd46416cc8b"
diff --git a/pyproject.toml b/pyproject.toml
index 9405b9c2..da6c24c3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -26,15 +26,6 @@ packages = [
{ include = "humanloop", from = "src"}
]
-[tool.poetry.group.dev.dependencies]
-parse-type = "^0.6.4"
-anthropic = "^0.37.1"
-mistralai = "<1.0.0"
-groq = "^0.11.0"
-cohere = "^5.11.2"
-replicate = "^1.0.3"
-jsonschema = "^4.23.0"
-types-jsonschema = "^4.23.0.20240813"
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -46,18 +37,23 @@ httpx-sse = "0.4.0"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
-
+parse = "^1.20.2"
opentelemetry-sdk = "^1.27.0"
opentelemetry-api = "^1.27.0"
opentelemetry-instrumentation-openai = "^0.33.3"
opentelemetry-instrumentation-cohere = "^0.33.3"
opentelemetry-instrumentation-anthropic = "^0.33.3"
-opentelemetry-instrumentation-mistralai = "^0.33.3"
-
-parse = "^1.20.2"
opentelemetry-instrumentation-groq = "^0.33.3"
opentelemetry-instrumentation-replicate = "^0.33.3"
-[tool.poetry.dev-dependencies]
+
+[tool.poetry.group.dev.dependencies]
+parse-type = "^0.6.4"
+anthropic = "^0.37.1"
+groq = "^0.11.0"
+cohere = "^5.11.2"
+replicate = "^1.0.3"
+jsonschema = "^4.23.0"
+types-jsonschema = "^4.23.0.20240813"
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
@@ -77,6 +73,33 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.ruff.lint]
+select = [
+ "E", # pycodestyle errors
+ "W", # pycodestyle warnings
+ "F", # pyflakes
+ "I", # isort
+ "C", # flake8-comprehensions
+ "B", # flake8-bugbear
+ "UP", # pyupgrade
+ "DTZ", # unsafe naive datetime
+ "PL", # pylint
+]
+ignore = [
+ "B904", # raise without from inside except, TODO: turn back on
+ "E501", # line too long, handled by ruff formatter
+ "UP015", # redundant open modes
+ "B008", # do not perform function calls in argument defaults
+ "C901", # too complex
+ "PLR0912", # too many branches
+ "PLR0913", # too many arguments
+ "PLR0911", # too many return statements
+ "PLR0915", # too many statements,
+ "PLR2004", # magic value comparison
+ "PLR5501", # 'elif' instead of 'else' then 'if'
+ "PLE1205", # too many arguments for format string
+]
+
[build-system]
requires = ["poetry-core"]
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 45a4bec8..3d2e0927 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -4,12 +4,13 @@
import httpx
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
+from opentelemetry.trace import Tracer
from humanloop.types.response_format import ResponseFormat
-from .decorators.flow import flow as flow_decorator
-from .decorators.prompt import prompt as prompt_decorator
-from .decorators.tool import tool as tool_decorator
+from .decorators.flow import flow as flow_decorator_factory
+from .decorators.prompt import prompt as prompt_decorator_factory
+from .decorators.tool import tool as tool_decorator_factory
from humanloop.core.client_wrapper import SyncClientWrapper
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
@@ -17,7 +18,7 @@
from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
from .otel.exporter import HumanloopSpanExporter
from .otel.processor import HumanloopSpanProcessor
-from .otel import instrument_provider, set_tracer
+from .otel import instrument_provider, set_humanloop_sdk_tracer
from .base_client import BaseHumanloop, AsyncBaseHumanloop
from .environment import HumanloopEnvironment
from .eval_utils import _run_eval, Dataset, File, Evaluator, EvaluatorCheck
@@ -69,9 +70,10 @@ def __init__(self, client_wrapper: SyncClientWrapper):
class Humanloop(BaseHumanloop):
"""
- See docstring of BaseHumanloop.
+ See docstring of :class:`BaseHumanloop`.
- This class extends the base client that contains the auto generated SDK functionality with custom evaluation utilities.
+ This class extends the base client with custom evaluation utilities
+ and decorators for declaring Files in code.
"""
def __init__(
@@ -83,10 +85,19 @@ def __init__(
timeout: typing.Optional[float] = None,
follow_redirects: typing.Optional[bool] = True,
httpx_client: typing.Optional[httpx.Client] = None,
+ opentelemetry_tracer_provider: Optional[TracerProvider] = None,
+ opentelemetry_tracer: Optional[Tracer] = None,
):
- """See docstring of BaseHumanloop.__init__(...)
+ """See docstring of :func:`BaseHumanloop.__init__(...)`
- This method extends the base client with evaluation utilities.
+ This class extends the base client with custom evaluation utilities
+ and decorators for declaring Files in code.
+
+ The Humanloop SDK File decorators use OpenTelemetry internally. You can provide a
+ TracerProvider and a Tracer if you'd like to integrate them with your existing
+ telemetry system. Otherwise, an internal TracerProvider will be used.
+ If you provide only the `TraceProvider`, the SDK will log under a Tracer
+ named `humanloop.sdk`.
"""
super().__init__(
base_url=base_url,
@@ -97,13 +108,16 @@ def __init__(
httpx_client=httpx_client,
)
- self._tracer_provider = TracerProvider(
- resource=Resource(
- attributes={
- "instrumentor": "humanloop.sdk",
- }
- ),
- )
+ if opentelemetry_tracer_provider is not None:
+ self._tracer_provider = opentelemetry_tracer_provider
+ else:
+ self._tracer_provider = TracerProvider(
+ resource=Resource(
+ attributes={
+ "instrumentor": "humanloop.sdk",
+ }
+ ),
+ )
instrument_provider(provider=self._tracer_provider)
self._tracer_provider.add_span_processor(
HumanloopSpanProcessor(
@@ -112,8 +126,12 @@ def __init__(
)
),
)
- tracer = self._tracer_provider.get_tracer("humanloop.sdk")
- set_tracer(tracer)
+
+ if opentelemetry_tracer is not None:
+ set_humanloop_sdk_tracer(opentelemetry_tracer)
+ else:
+ tracer = self._tracer_provider.get_tracer("humanloop.sdk")
+ set_humanloop_sdk_tracer(tracer)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
@@ -123,7 +141,6 @@ def __init__(
def prompt(
self,
*,
- # TODO: Template can be a list of objects
path: Optional[str] = None,
model: Optional[str] = None,
endpoint: Optional[ModelEndpoints] = None,
@@ -139,16 +156,66 @@ def prompt(
seed: Optional[int] = None,
response_format: Optional[ResponseFormat] = None,
):
- """Decorator to mark a function as a Humanloop Prompt.
-
- The decorator intercepts calls to LLM provider APIs, extracts
- hyperparameters used in the call, and upsert a new Prompt File
- on Humanloop based on them. If a hyperparameter is specified in the
- `@prompt` decorator, then it overrides any inference made from inside
- the function.
-
- If the Prompt already exists on the specified path, a new version will
- be inserted when the hyperparameters used in making LLM calls change.
+ """Decorator for declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code.
+
+ The decorator intercepts calls to LLM provider APIs and creates
+ a new Prompt file based on the hyperparameters used in the call.
+ If a hyperparameter is specified in the `@prompt` decorator, then
+ they override any value intercepted from the LLM provider call.
+
+ If the (Prompt)[https://humanloop.com/docs/explanation/prompts] already exists
+ on the specified path, a new version will be upserted when any of the above change.
+
+ Here's an example of declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code:
+
+ ```python
+ @prompt(template="You are an assistant on the following topics: {topics}.")
+ def call_llm(messages):
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return client.chat.completions.create(
+ model="gpt-4o",
+ temperature=0.8,
+ frequency_penalty=0.5,
+ max_tokens=200,
+ messages=messages,
+ ).choices[0].message.content
+ ```
+
+ This will create a (Prompt)[https://humanloop.com/docs/explanation/prompts] with the following attributes:
+
+ ```python
+ {
+ model: "gpt-4o",
+ endpoint: "chat",
+ template: "You are an assistant on the following topics: {topics}.",
+ provider: "openai",
+ max_tokens: 200,
+ temperature: 0.8,
+ frequency_penalty: 0.5,
+ }
+
+ Every call to the decorated function will create a Log against the Prompt. For example:
+
+ ```python
+ call_llm(messages=[
+ {"role": "system", "content": "You are an assistant on the following topics: finance."}
+ {"role": "user", "content": "What can you do?"}
+ ])
+ ```
+
+ The Prompt Log will be created with the following inputs:
+ ```python
+ {
+ "inputs": {
+ "topics": "finance"
+ },
+ messages: [
+ {"role": "system", "content": "You are an assistant on the following topics: finance."}
+ {"role": "user", "content": "What can you do?"}
+ ]
+ "output": "Hello, I'm an assistant that can help you with anything related to finance."
+ }
+ ```
:param path: The path where the Prompt is created. If not
provided, the function name is used as the path and the File
@@ -194,7 +261,7 @@ def prompt(
Only `{"type": "json_object"}` is currently supported
for chat.
"""
- return prompt_decorator(
+ return prompt_decorator_factory(
path=path,
model=model,
endpoint=endpoint,
@@ -218,14 +285,60 @@ def tool(
setup_values: Optional[dict[str, Optional[Any]]] = None,
attributes: Optional[dict[str, Optional[Any]]] = None,
):
- """Decorator to mark a function as a Humanloop Tool.
-
- The decorator inspect the wrapped function signature and code to infer
- the File kernel and JSON schema for the Tool. Any change to the decorated
- function will create a new version of the Tool, provided that the path
- remains the same.
-
- Every call to the decorated function will create a Log against the Tool.
+ """Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code.
+
+ The decorator inspects the wrapped function's source code, name,
+ argument type hints and docstring to infer the values that define
+ the [Tool](https://humanloop.com/docs/explanation/tools).
+
+ If the [Tool](https://humanloop.com/docs/explanation/tools) already exists
+ on the specified path, a new version will be upserted when any of the
+ above change.
+
+ Here's an example of declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code:
+
+ ```python
+ @tool
+ def calculator(a: int, b: Optional[int]) -> int:
+ \"\"\"Add two numbers together.\"\"\"
+ return a + b
+ ```
+
+ This will create a [Tool](https://humanloop.com/docs/explanation/tools) with the following attributes:
+ ```python
+ {
+ strict: True,
+ function: {
+ "name": "calculator",
+ "description": "Add two numbers together.",
+ "parameters": {
+ type: "object",
+ properties: {
+ a: {type: "integer"},
+ b: {type: "integer"}
+ },
+ required: ["a"],
+ },
+ }
+ }
+ ```
+
+ Every call to the decorated function will create a Log against the Tool. For example:
+
+ ```python
+ calculator(a=1, b=2)
+ ```
+
+ Will create the following Log:
+
+ ```python
+ {
+ "inputs": {
+ a: 1,
+ b: 2
+ },
+ "output": 3
+ }
:param path: The path to the Tool. If not provided, the function name
will be used as the path and the File will be created in the root
@@ -238,7 +351,7 @@ def tool(
Helpful to separate Tool versions from each other
with details on how they were created or used.
"""
- return tool_decorator(
+ return tool_decorator_factory(
path=path,
setup_values=setup_values,
attributes=attributes,
@@ -248,16 +361,39 @@ def flow(
self,
*,
path: Optional[str] = None,
- attributes: dict[str, typing.Any] = {},
+ attributes: Optional[dict[str, typing.Any]] = None,
):
- """Decorator to log a Flow to the Humanloop API.
-
- The decorator logs the inputs and outputs of the decorated function to
- create a Log against the Flow in Humanloop.
-
- The decorator is an entrypoint to the instrumented AI feature. Decorated
- functions called in the context of function decorated with Flow will create
- a Trace in Humanloop.
+ """Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code.
+
+ A [Flow](https://humanloop.com/docs/explanation/flows) decorator should be added
+ at the entrypoint of your LLM feature. Call other functions decorated with
+ Humanloop SDK decorators to create a Trace of Logs on Humanloop.
+
+ Here's an example of declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code:
+ ```python
+ @prompt(template="You are an assistant on the following topics: {topics}.")
+ def call_llm(messages):
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return client.chat.completions.create(
+ model="gpt-4o",
+ temperature=0.8,
+ frequency_penalty=0.5,
+ max_tokens=200,
+ messages=messages,
+ ).choices[0].message.content
+
+ @flow(attributes={"version": "v1"})
+ def entrypoint():
+ while True:
+ messages = []
+ user_input = input("You: ")
+ if user_input == "exit":
+ break
+ messages.append({"role": "user", "content": user_input})
+ response = call_llm(messages)
+ messages.append({"role": "assistant", "content": response})
+ print(f"Assistant: {response}")
+ ```
:param path: The path to the Flow. If not provided, the function name
will be used as the path and the File will be created in the root
@@ -265,7 +401,9 @@ def flow(
:param attributes: A key-value object identifying the Flow Version.
"""
- return flow_decorator(path=path, attributes=attributes)
+ if attributes is None:
+ attributes = {}
+ return flow_decorator_factory(path=path, attributes=attributes)
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index ca4d8d73..89d1c8f2 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -3,32 +3,41 @@
from typing import Any, Callable, Optional
from humanloop.decorators.helpers import args_to_inputs
-from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
from humanloop.otel.helpers import write_to_opentelemetry_span
def flow(
path: Optional[str] = None,
- attributes: dict[str, Any] = {},
+ attributes: Optional[dict[str, Any]] = None,
):
+ if attributes is None:
+ attributes = {}
+
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_context()
+ with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_parent_metadata()
if trace_metadata:
+ # Add Trace metadata to the Span so it can be correctly
+ # linked to the parent Span. trace_metadata will be
+ # non-null if the function is called by a @flow
+ # decorated function.
write_to_opentelemetry_span(
span=span,
key=HL_TRACE_METADATA_KEY,
value={
- **trace_metadata,
+ "trace_parent_id": trace_metadata["trace_parent_id"],
"trace_id": span.get_span_context().span_id,
"is_flow_log": True,
},
)
else:
+ # The Flow Log is not nested under another Flow Log
+ # Set the trace_id to the current span_id
write_to_opentelemetry_span(
span=span,
key=HL_TRACE_METADATA_KEY,
@@ -37,8 +46,13 @@ def wrapper(*args, **kwargs):
"is_flow_log": True,
},
)
- # Set this as the Flow to which Logs are appended
- # Important: Flows might be nested under each other
+
+ # Add Trace metadata to the context for the children
+ # Spans to be able to link to the parent Span
+ # Unlike other decorators, which push to context stack
+ # only if trace_metadata is present, this decorator
+ # always pushes to context stack since it is responsible
+ # for creating the context stack
push_trace_context(
{
"trace_id": span.get_span_context().span_id,
@@ -47,10 +61,7 @@ def wrapper(*args, **kwargs):
},
)
- result = func(*args, **kwargs)
-
- pop_trace_context()
-
+ # Write the Flow Kernel to the Span on HL_FILE_OT_KEY
write_to_opentelemetry_span(
span=span,
key=HL_FILE_OT_KEY,
@@ -61,16 +72,27 @@ def wrapper(*args, **kwargs):
"flow": {"attributes": attributes} if attributes else OT_EMPTY_ATTRIBUTE,
},
)
+
+ # Call the decorated function
+ output = func(*args, **kwargs)
+
+ # All children Spans have been created when the decorated function returns
+ # Remove the Trace metadata from the context so the siblings can have
+ # their children linked properly
+ pop_trace_context()
+
+ # Write the Flow Log to the Span on HL_LOG_OT_KEY
write_to_opentelemetry_span(
span=span,
key=HL_LOG_OT_KEY,
value={
"inputs": args_to_inputs(func, args, kwargs),
- "output": result,
+ "output": output,
},
)
- return result
+ # Return the output of the decorated function
+ return output
return wrapper
diff --git a/src/humanloop/decorators/helpers.py b/src/humanloop/decorators/helpers.py
index 2c4e7195..d501f800 100644
--- a/src/humanloop/decorators/helpers.py
+++ b/src/humanloop/decorators/helpers.py
@@ -3,6 +3,18 @@
def args_to_inputs(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
+ """Maps arguments to their corresponding parameter names in the function signature.
+
+ For example:
+ ```python
+ def foo(a, b=2, c=3):
+ pass
+
+ assert args_to_inputs(foo, (1, 2), {}) == {'a': 1, 'b': 2, 'c': 3}
+ assert args_to_inputs(foo, (1,), {'b': 8}) == {'a': 1, 'b': 8, 'c': 3}
+ assert args_to_inputs(foo, (1,), {}) == {'a': 1, 'b': 2, 'c': 3}
+ ```
+ """
signature = inspect.signature(func)
bound_args = signature.bind(*args, **kwargs)
bound_args.apply_defaults()
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 7a44fe3f..04940aa8 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -2,7 +2,7 @@
from functools import wraps
from typing import Any, Callable, Optional
-from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
@@ -48,17 +48,21 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_context()
+ with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_parent_metadata()
if trace_metadata:
- # We are in a Flow context
+ # Add Trace metadata to the Span so it can be correctly
+ # linked to the parent Span. trace_metadata will be
+ # non-null if the function is called by a @flow
+ # decorated function.
write_to_opentelemetry_span(
span=span,
key=HL_TRACE_METADATA_KEY,
value={**trace_metadata, "is_flow_log": False},
)
- # Set current Prompt to act as parent for Logs nested underneath
+ # Add Trace metadata to the context for the children
+ # Spans to be able to link to the parent Span
push_trace_context(
{
**trace_metadata,
@@ -67,11 +71,15 @@ def wrapper(*args, **kwargs):
},
)
+ # Write the Prompt Kernel to the Span on HL_FILE_OT_KEY
write_to_opentelemetry_span(
span=span,
key=HL_FILE_OT_KEY,
value={
"path": path if path else func.__name__,
+ # Values not specified in the decorator will be
+ # completed with the intercepted values from the
+ # Instrumentors for LLM providers
"prompt": {
"template": template,
"temperature": temperature,
@@ -90,28 +98,28 @@ def wrapper(*args, **kwargs):
},
)
- try:
- output = func(*args, **kwargs)
- except Exception as e:
- # TODO Some fails coming from here, they result in a fast end or duplicate
- # spans outputted to the Humanloop API
- print(e)
- span.record_exception(e)
- output = None
+ # Call the decorated function
+ output = func(*args, **kwargs)
+ # All children Spans have been created when the decorated function returns
+ # Remove the Trace metadata from the context so the siblings can have
+ # their children linked properly
if trace_metadata:
# Go back to previous trace context in Trace context
pop_trace_context()
- hl_log = {}
+ prompt_log = {}
if output:
- hl_log["output"] = output
+ prompt_log["output"] = output
+
+ # Write the Prompt Log to the Span on HL_LOG_OT_KEY
write_to_opentelemetry_span(
span=span,
key=HL_LOG_OT_KEY,
- value=hl_log,
+ value=prompt_log,
)
+ # Return the output of the decorated function
return output
return wrapper
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 9a229e22..e44e6d90 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -6,7 +6,7 @@
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence, TypedDict, Union
-from humanloop.otel import get_trace_context, get_tracer, pop_trace_context, push_trace_context
+from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.tool_function import ToolFunction
@@ -15,66 +15,374 @@
from .helpers import args_to_inputs
-def _extract_annotation_signature(annotation: typing.Type) -> Union[list, tuple]:
+def tool(
+ path: Optional[str] = None,
+ setup_values: Optional[dict[str, Optional[Any]]] = None,
+ attributes: Optional[dict[str, typing.Any]] = None,
+ strict: bool = True,
+):
+ def decorator(func: Callable):
+ tool_kernel = _build_tool_kernel(
+ func=func,
+ attributes=attributes,
+ setup_values=setup_values,
+ strict=strict,
+ )
+
+ # Mypy complains about adding attribute on function but it's nice UX
+ func.json_schema = tool_kernel.function.model_dump() # type: ignore
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ trace_metadata = get_trace_parent_metadata()
+
+ if trace_metadata:
+ # Add Trace metadata to the Span so it can be correctly
+ # linked to the parent Span. trace_metadata will be
+ # non-null if the function is called by a @flow
+ # decorated function.
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_TRACE_METADATA_KEY,
+ value={**trace_metadata, "is_flow_log": False},
+ )
+ # Add Trace metadata to the context for the children
+ # Spans to be able to link to the parent Span
+ push_trace_context(
+ {
+ **trace_metadata,
+ "trace_parent_id": span.get_span_context().span_id,
+ "is_flow_log": False,
+ }
+ )
+
+ # Write the Tool Kernel to the Span on HL_FILE_OT_KEY
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_FILE_OT_KEY,
+ value={
+ "path": path if path else func.__name__,
+ "tool": tool_kernel.model_dump(),
+ },
+ )
+
+ # Call the decorated function
+ output = func(*args, **kwargs)
+
+ # All children Spans have been created when the decorated function returns
+ # Remove the Trace metadata from the context so the siblings can have
+ # their children linked properly
+ if trace_metadata:
+ pop_trace_context()
+
+ # Populate known Tool Log attributes
+ tool_log = {
+ "inputs": args_to_inputs(func, args, kwargs),
+ }
+ if output:
+ tool_log["output"] = output
+
+ # Write the Tool Log to the Span on HL_LOG_OT_KEY
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_OT_KEY,
+ value=tool_log,
+ )
+
+ # Return the output of the decorated function
+ return output
+
+ return wrapper
+
+ return decorator
+
+
+def _build_tool_kernel(
+ func: Callable,
+ attributes: Optional[dict[str, Optional[Any]]],
+ setup_values: Optional[dict[str, Optional[Any]]],
+ strict: bool,
+) -> ToolKernelRequest:
+ """Build ToolKernelRequest object from decorated function."""
+ return ToolKernelRequest(
+ source_code=textwrap.dedent(
+ # Remove the tool decorator from source code
+ inspect.getsource(func).split("\n", maxsplit=1)[1]
+ ),
+ attributes=attributes,
+ setup_values=setup_values,
+ function=_build_function_property(
+ func=func,
+ strict=strict,
+ ),
+ )
+
+
+def _build_function_property(func: Callable, strict: bool) -> ToolFunction:
+ """Build `function` property inside ToolKernelRequest."""
+ tool_name = func.__name__
+ description = func.__doc__
+ if description is None:
+ description = ""
+ return ToolFunction(
+ name=tool_name,
+ description=description,
+ parameters=_build_function_parameters_property(func),
+ strict=strict,
+ )
+
+
+class _JSONSchemaFunctionParameters(TypedDict):
+ type: str
+ properties: dict[str, dict]
+ required: list[str]
+
+
+def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
+ """Build `function.parameters` property inside ToolKernelRequest."""
+ properties: dict[str, Any] = {}
+ required: list[str] = []
+ signature = inspect.signature(func)
+
+ for parameter in signature.parameters.values():
+ if parameter.kind in (
+ inspect.Parameter.VAR_POSITIONAL,
+ inspect.Parameter.VAR_KEYWORD,
+ ):
+ raise ValueError(f"{func.__name__}: Varargs and kwargs are not supported by the @tool decorator")
+
+ for parameter in signature.parameters.values():
+ try:
+ parameter_signature = _parse_annotation(parameter.annotation)
+ except ValueError as e:
+ raise ValueError(f"{func.__name__}: {e.args[0]}") from e
+ param_json_schema = _annotation_parse_to_json_schema(parameter_signature)
+ properties[parameter.name] = param_json_schema
+ if not _parameter_is_optional(parameter):
+ required.append(parameter.name)
+
+ if len(properties) == 0 and len(required) == 0:
+ # Edge case: function with no parameters
+ return _JSONSchemaFunctionParameters(
+ type="object",
+ properties={},
+ required=[],
+ )
+ return _JSONSchemaFunctionParameters(
+ type="object",
+ # False positive, expected tuple[str] but got tuple[str, ...]
+ required=tuple(required), # type: ignore
+ properties=properties,
+ )
+
+
+def _parse_annotation(annotation: typing.Type) -> Union[list, tuple]:
+ """Parse constituent parts of a potentially nested type hint.
+
+ Custom types are not supported, only built-in types and typing module types.
+
+
+ Method returns potentially nested lists, with each list describing a
+ level of type nesting. For a nested type, the function recursively calls
+ itself to parse the inner type.
+
+ When the annotation is optional, a tuple is returned with the inner type
+ to signify that the parameter is nullable.
+
+ For lists, a list with two elements is returned, where the first element
+ is the list type and the second element is the inner type.
+
+ For dictionaries, a list with three elements is returned, where the first
+ element is the dict type, the second element is the key type, and the
+ third element is the value type.
+
+ For tuples, a list where the fist element is the tuple type and the rest
+ describes the inner types.
+
+ For Union types, a list with the first element being the Union type and
+ the rest describing the inner types.
+
+ Note that for nested types that lack inner type, e.g. list instead of
+ list[str], the inner type is set to inspect._empty. This edge case is
+ handled by _annotation_parse_to_json_schema.
+
+ Examples:
+ str -> [str]
+ Optional[str] -> (str)
+ str | None -> (str)
+
+ list[str] -> [list, [str]]
+ Optional[list[str]] -> (list, [str])
+
+ dict[str, int] -> [dict, [str], [int]]
+ Optional[dict[str, int]] -> (dict, [str], [int])
+
+ list[list[str]] -> [list, [list, str]]
+ list[Optional[list[str]]] -> [list, (list, [str])]
+
+ dict[str, Optional[int]] -> [dict, [str], (int)]
+
+ Union[str, int] -> [Union, [str], [int]]
+
+ tuple[str, int, list[str]] -> [tuple, [str], [int], [list, str]]
+ tuple[Optional[str], int, Optional[list[str]]] -> (str, [int], (list, str))
+
+ list -> [list]
+ """
origin = typing.get_origin(annotation)
if origin is None:
- if annotation is inspect._empty:
- raise ValueError("Empty type hint annotation")
+ # Either not a nested type or no type hint
+ # inspect._empty is used for parameters without type hints
+ if annotation not in (str, int, float, bool, inspect._empty, dict, list, tuple):
+ raise ValueError(f"Unsupported type hint: {annotation}")
return [annotation]
if origin is list:
- inner_type = _extract_annotation_signature(typing.get_args(annotation)[0])
+ inner_type = _parse_annotation(typing.get_args(annotation)[0])
return [origin, inner_type]
if origin is dict:
- key_type = _extract_annotation_signature(typing.get_args(annotation)[0])
- value_type = _extract_annotation_signature(typing.get_args(annotation)[1])
+ key_type = _parse_annotation(typing.get_args(annotation)[0])
+ value_type = _parse_annotation(typing.get_args(annotation)[1])
return [origin, key_type, value_type]
if origin is tuple:
return [
origin,
- *[_extract_annotation_signature(arg) for arg in typing.get_args(annotation)],
+ *[_parse_annotation(arg) for arg in typing.get_args(annotation)],
]
if origin is typing.Union:
sub_types = typing.get_args(annotation)
if sub_types[-1] is type(None):
# Union is an Optional type
if len(sub_types) == 2:
- return tuple(_extract_annotation_signature(sub_types[0]))
+ return tuple(_parse_annotation(sub_types[0]))
return (
origin,
- *[_extract_annotation_signature(sub_type) for sub_type in sub_types[:-1]],
+ *[_parse_annotation(sub_type) for sub_type in sub_types[:-1]],
)
# Union type
return [
origin,
- *[_extract_annotation_signature(sub_type) for sub_type in sub_types],
+ *[_parse_annotation(sub_type) for sub_type in sub_types],
]
raise ValueError(f"Unsupported origin: {origin}")
-def _build_json_schema_parameter(arg: Union[list, tuple]) -> Mapping[str, Union[str, Mapping, Sequence]]:
+def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Union[str, Mapping, Sequence]]:
+ """
+ Convert parse result from _parse_annotation to JSON Schema for a parameter.
+
+ The function recursively converts the nested type hints to a JSON Schema.
+
+ Note that 'any' is not supported by JSON Schema, so we allow any type as a workaround.
+
+ Examples:
+ [str] -> {"type": "string"}
+ (str) -> {"type": ["string", "null"]}
+
+ [list, [str]] -> {"type": "array", "items": {"type": "string"}}
+ (list, [str]) -> {"type": ["array", "null"], "items": {"type": "string"}}
+
+ [dict, [str], [int]] ->
+ {
+ "type": "object",
+ "properties": {
+ "key": {"type": "string"},
+ "value": {"type": "integer"}
+ }
+ }
+
+ [list, [list, str]] ->
+ {
+ "type": "array",
+ "items": {
+ "type": "array",
+ "items": {"type": "string"}
+ }
+
+ tuple[str, int, list[str]] ->
+ {
+ type: "array",
+ items: [
+ {"type": "string"},
+ {"type": "integer"},
+ {
+ "type": "array",
+ "items": {"type": "string"}
+ }
+ ]
+ }
+
+ Union[str, int] ->
+ {
+ "anyOf": [
+ {"type": "string"},
+ {"type": "integer"}
+ ]
+ }
+
+ dict[int, list] ->
+ {
+ "type": "object",
+ "properties": {
+ "key": {"type": "integer"},
+ "value": {
+ "type": "array",
+ "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
+ }
+ }
+
+ Optional[list] ->
+ {
+ "type": ["array", "null"],
+ "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ }
+ """
is_nullable = isinstance(arg, tuple)
arg_type: Mapping[str, Union[str, Mapping, Sequence]]
if arg[0] is typing.Union:
arg_type = {
- "anyOf": [_build_json_schema_parameter(sub_type) for sub_type in arg[1:]],
+ "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg[1:]],
}
if arg[0] is tuple:
- arg_type = {
- "type": "array",
- "items": [_build_json_schema_parameter(sub_type) for sub_type in arg[1:]],
- }
+ if len(arg) == 1:
+ # tuple annotation with no type hints
+ # This is equivalent with a list, since the
+ # number of elements is not specified
+ arg_type = {
+ "type": "array",
+ "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ }
+ else:
+ arg_type = {
+ "type": "array",
+ "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg[1:]],
+ }
if arg[0] is list:
+ if len(arg) == 1:
+ # list annotation with no type hints
+ if isinstance(arg, tuple):
+ # Support Optional annotation
+ arg = (list, [inspect._empty])
+ else:
+ # Support non-Optional list annotation
+ arg = [list, [inspect._empty]]
arg_type = {
"type": "array",
- "items": _build_json_schema_parameter(arg[1]),
+ "items": _annotation_parse_to_json_schema(arg[1]),
}
if arg[0] is dict:
+ if len(arg) == 1:
+ # dict annotation with no type hints
+ if isinstance(arg, tuple):
+ arg = (dict, [inspect._empty], [inspect._empty])
+ else:
+ arg = [dict, [inspect._empty], [inspect._empty]]
arg_type = {
"type": "object",
"properties": {
- "key": _build_json_schema_parameter(arg[1]),
- "value": _build_json_schema_parameter(arg[2]),
+ "key": _annotation_parse_to_json_schema(arg[1]),
+ "value": _annotation_parse_to_json_schema(arg[2]),
},
}
if arg[0] is builtins.str:
@@ -85,6 +393,9 @@ def _build_json_schema_parameter(arg: Union[list, tuple]) -> Mapping[str, Union[
arg_type = {"type": "number"}
if arg[0] is builtins.bool:
arg_type = {"type": "boolean"}
+ if arg[0] is inspect._empty:
+ # JSON Schema dropped support for 'any' type, we allow any type as a workaround
+ arg_type = {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
if is_nullable:
if arg[0] is typing.Union:
@@ -98,153 +409,16 @@ def _build_json_schema_parameter(arg: Union[list, tuple]) -> Mapping[str, Union[
return arg_type
-class JSONSchemaFunctionParameters(TypedDict):
- type: str
- properties: dict[str, dict]
- required: list[str]
-
-
def _parameter_is_optional(parameter: inspect.Parameter) -> bool:
- """Check if tool parameter is mandatory."""
+ """Check if tool parameter is mandatory.
+
+ Examples:
+ Optional[T] -> True
+ T | None -> True
+ T -> False
+ """
# Check if the parameter can be None, either via Optional[T] or T | None type hint
origin = typing.get_origin(parameter.annotation)
# sub_types refers to T inside the annotation
sub_types = typing.get_args(parameter.annotation)
return origin is typing.Union and len(sub_types) > 0 and sub_types[-1] is type(None)
-
-
-def _parse_tool_parameters_schema(func) -> JSONSchemaFunctionParameters:
- properties: dict[str, Any] = {}
- required: list[str] = []
- signature = inspect.signature(func)
-
- for parameter in signature.parameters.values():
- if parameter.kind in (
- inspect.Parameter.VAR_POSITIONAL,
- inspect.Parameter.VAR_KEYWORD,
- ):
- raise ValueError(f"{func.__name__}: Varargs and kwargs are not supported by the @tool decorator")
-
- for parameter in signature.parameters.values():
- try:
- parameter_signature = _extract_annotation_signature(parameter.annotation)
- except ValueError as e:
- raise ValueError(f"{func.__name__}: {parameter.name} lacks a type hint annotation") from e
- param_json_schema = _build_json_schema_parameter(parameter_signature)
- properties[parameter.name] = param_json_schema
- if not _parameter_is_optional(parameter):
- required.append(parameter.name)
-
- if len(properties) == 0 and len(required) == 0:
- # Edge case: function with no parameters
- return JSONSchemaFunctionParameters(
- type="object",
- properties={},
- required=[],
- )
- return JSONSchemaFunctionParameters(
- type="object",
- # False positive, expected tuple[str] but got tuple[str, ...]
- required=tuple(required), # type: ignore
- properties=properties,
- )
-
-
-def _tool_json_schema(func: Callable, strict: bool) -> ToolFunction:
- tool_name = func.__name__
- description = func.__doc__
- if description is None:
- description = ""
- return ToolFunction(
- name=tool_name,
- description=description,
- parameters=_parse_tool_parameters_schema(func),
- strict=strict,
- )
-
-
-def _build_tool_kernel(
- func: Callable,
- attributes: Optional[dict[str, Optional[Any]]],
- setup_values: Optional[dict[str, Optional[Any]]],
- strict: bool,
-) -> ToolKernelRequest:
- return ToolKernelRequest(
- source_code=textwrap.dedent(
- # Remove the tool decorator from source code
- inspect.getsource(func).split("\n", maxsplit=1)[1]
- ),
- attributes=attributes,
- setup_values=setup_values,
- function=_tool_json_schema(
- func=func,
- strict=strict,
- ),
- )
-
-
-def tool(
- path: Optional[str] = None,
- setup_values: Optional[dict[str, Optional[Any]]] = None,
- attributes: Optional[dict[str, typing.Any]] = None,
- strict: bool = True,
-):
- def decorator(func: Callable):
- # Complains about adding attribute on function
- # Nice UX, but mypy doesn't like it
- file_obj = _build_tool_kernel(
- func=func,
- attributes=attributes,
- setup_values=setup_values,
- strict=strict,
- )
-
- func.json_schema = file_obj.function.model_dump() # type: ignore
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- with get_tracer().start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_context()
-
- if trace_metadata:
- write_to_opentelemetry_span(
- span=span,
- key=HL_TRACE_METADATA_KEY,
- value={**trace_metadata, "is_flow_log": False},
- )
- push_trace_context(
- {
- **trace_metadata,
- "trace_parent_id": span.get_span_context().span_id,
- "is_flow_log": False,
- }
- )
-
- output = func(*args, **kwargs)
- if trace_metadata:
- pop_trace_context()
-
- tool_log = {
- "inputs": args_to_inputs(func, args, kwargs),
- }
- if output:
- tool_log["output"] = output
-
- write_to_opentelemetry_span(
- span=span,
- key=HL_FILE_OT_KEY,
- value={
- "path": path if path else func.__name__,
- "tool": file_obj.model_dump(),
- },
- )
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_OT_KEY,
- value=tool_log,
- )
- return output
-
- return wrapper
-
- return decorator
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 169b7ded..ee5073f4 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -8,17 +8,41 @@
from humanloop.otel.constants import HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import module_is_installed
+"""
+Tracer to which Humanloop decorators will write Spans.
+Humanloop SDK will instantiate one for the decorators
+if the user does not provide a Tracer in the Humanloop
+client.
+"""
_TRACER = None
-_BAGGAGE_CONTEXT: list[Context] = [Context()]
+"""
+Humanloop SDK uses the Baggage concept from OTel
+to store the Trace metadata. Read more here:
+https://opentelemetry.io/docs/concepts/signals/baggage/
-def set_tracer(tracer: Tracer):
- global _TRACER
+The top of the stack contains the Trace information of
+the parent Span.
+
+When a Span is created by a decorator, the metadata of
+that Span is pushed to the stack so the children can
+peek at it and determine its parent in a Flow Trace.
+
+When the parent Span is completed, the context is popped
+off the stack.
+"""
+_BAGGAGE_CONTEXT_STACK: list[Context] = [Context()]
+
+
+def set_humanloop_sdk_tracer(tracer: Tracer):
+ """Set Tracer used by Humanloop SDK to instrument the decorators."""
+ global _TRACER # noqa: PLW0603
_TRACER = tracer
-def get_tracer() -> Tracer:
+def get_humanloop_sdk_tracer() -> Tracer:
+ """Get Tracer used by Humanloop SDK to instrument the decorators."""
assert _TRACER is not None, "Internal error: OTT Tracer should have been set in the client"
return _TRACER
@@ -26,7 +50,8 @@ def get_tracer() -> Tracer:
def instrument_provider(provider: TracerProvider):
"""Add Instrumentors to the TracerProvider.
- Instrumentors add extra spans which are merged in Humanloop Span logs.
+ Instrumentors intercept calls to libraries such as OpenAI client
+ and adds metadata to the Spans created by the decorators.
"""
if module_is_installed("openai"):
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
@@ -43,51 +68,47 @@ def instrument_provider(provider: TracerProvider):
AnthropicInstrumentor().instrument(tracer_provider=provider)
- if module_is_installed("mistralai"):
- from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
-
- # TODO: Need to to a PR to the instrumentor to support > 1.0.0 Mistral clients
- MistralAiInstrumentor().instrument(tracer_provider=provider)
-
if module_is_installed("groq"):
from opentelemetry.instrumentation.groq import GroqInstrumentor
GroqInstrumentor().instrument(tracer_provider=provider)
- if module_is_installed("replicate"):
- from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
+ # NOTE: ReplicateInstrumentor would require us to bump minimum Python version from 3.8 to 3.9
+ # TODO: Do a PR against the open-source ReplicateInstrumentor to support lower Python versions
+ # if module_is_installed("replicate"):
+ # from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
- ReplicateInstrumentor().instrument(tracer_provider=provider)
+ # ReplicateInstrumentor().instrument(tracer_provider=provider)
def push_trace_context(trace_metadata: dict):
- """Set metadata for Trace parent.
+ """Push Trace metadata for a parent Span.
- Used before the wrapped function is executed. All decorated functions
- called from the decorated function will use this metadata to determine
- the Log it should be associated to in Flow Trace.
+ Expected to be called when the Span is created
+ and before the wrapped function is executed.
+ Calling a wrapped function may create children
+ Spans, which will need to peek at the parent's
+ metadata.
"""
- global _BAGGAGE_CONTEXT
new_context = baggage.set_baggage(
HL_TRACE_METADATA_KEY,
trace_metadata,
- _BAGGAGE_CONTEXT[-1],
+ _BAGGAGE_CONTEXT_STACK[-1],
)
- _BAGGAGE_CONTEXT.append(new_context)
+ _BAGGAGE_CONTEXT_STACK.append(new_context)
def pop_trace_context():
- """Clear Trace parent metadata.
+ """Clear Trace metadata for a parent Span.
- Used after the wrapped function has been executed.
+ Expected to be called after the wrapped function
+ is executed. This allows Spans on the same level
+ to peek at their parent Trace metadata.
"""
- global _BAGGAGE_CONTEXT
- _BAGGAGE_CONTEXT.pop()
-
+ _BAGGAGE_CONTEXT_STACK.pop()
-def get_trace_context() -> Optional[object]:
- """Get Trace parent metadata for Flows."""
- global _BAGGAGE_CONTEXT
+def get_trace_parent_metadata() -> Optional[object]:
+ """Peek at Trace metadata stack."""
- return baggage.get_baggage(HL_TRACE_METADATA_KEY, _BAGGAGE_CONTEXT[-1])
+ return baggage.get_baggage(HL_TRACE_METADATA_KEY, _BAGGAGE_CONTEXT_STACK[-1])
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index e826ab20..0cee9015 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,3 +1,4 @@
+import logging
import typing
from queue import Queue
from threading import Thread
@@ -8,7 +9,7 @@
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
-from humanloop.otel.helpers import read_from_opentelemetry_span
+from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -16,25 +17,46 @@
from humanloop.base_client import BaseHumanloop
+logger = logging.getLogger("humanloop.sdk")
+
+
class HumanloopSpanExporter(SpanExporter):
- """SpanExporter that uploads OpenTelemetry spans to Humanloop Humanloop spans."""
+ """Upload Spans created by SDK decorators to Humanloop.
+
+ Spans not created by Humanloop SDK decorators will be ignored.
+ """
- WORK_THREADS = 8
+ DEFAULT_NUMBER_THREADS = 4
- def __init__(self, client: "BaseHumanloop") -> None:
+ def __init__(
+ self,
+ client: "BaseHumanloop",
+ worker_threads: Optional[int] = None,
+ ) -> None:
super().__init__()
- self._client: "BaseHumanloop" = client
- self._uploaded_log_ids: dict[str, str] = {}
- self._upload_queue: Queue = Queue()
- self._threads: list[Thread] = [Thread(target=self._do_work, daemon=True) for _ in range(self.WORK_THREADS)]
- self._shutdown: bool = False
+ self._client = client
+ self._uploaded_log_ids: dict[
+ str, str
+ ] = {} # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace
+ self._upload_queue: Queue = Queue() # Work queue for the threads uploading the spans
+ self._threads: list[Thread] = [
+ Thread(target=self._do_work, daemon=True) for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS)
+ ]
+ self._shutdown: bool = (
+ False # Signals threads no more work will arrive and they should wind down if the queue is empty
+ )
for thread in self._threads:
thread.start()
def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
- for span in spans:
- self._upload_queue.put(span)
- return SpanExportResult.SUCCESS
+ if not self._shutdown:
+ for span in spans:
+ if is_humanloop_span(span):
+ self._upload_queue.put(span)
+ return SpanExportResult.SUCCESS
+ else:
+ logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans")
+ return SpanExportResult.FAILURE
def shutdown(self) -> None:
self._shutdown = True
@@ -50,6 +72,19 @@ def force_flush(self, timeout_millis: int = 3000) -> bool:
return True
def _do_work(self):
+ """Upload spans to Humanloop.
+
+ Ran by worker threads. The threads use the self._shutdown flag to wait
+ for Spans to arrive. Setting a timeout on self._upload_queue.get() risks
+ shutting down the thread early as no Spans are produced e.g. while waiting
+ for user input into the instrumented feature or application.
+
+ Each thread will upload a Span to Humanloop, provided the Span has all its
+ dependencies uploaded. The dependency happens in a Flow Trace context, where
+ the Trace parent must be uploaded first. The Span Processor will send in Spans
+ bottoms-up, while the upload of a Trace happens top-down. If a Span did not
+ have its span uploaded yet, it will be requeued to be uploaded later.
+ """
# Do work while the Exporter was not instructed to
# wind down or the queue is not empty
while self._upload_queue.qsize() > 0 or not self._shutdown:
@@ -60,32 +95,56 @@ def _do_work(self):
except Exception:
continue
try:
- trace_metadata = read_from_opentelemetry_span(span_to_export, key=HL_TRACE_METADATA_KEY)
+ trace_metadata = read_from_opentelemetry_span(
+ span_to_export,
+ key=HL_TRACE_METADATA_KEY,
+ )
except KeyError:
trace_metadata = None
if "trace_parent_id" not in trace_metadata or trace_metadata["trace_parent_id"] in self._uploaded_log_ids:
# The Span is outside a Trace context or its parent has been uploaded
# we can safely upload it to Humanloop
- self._export_dispatch(span_to_export)
+ self._export_span_dispatch(span_to_export)
else: # The parent has not been uploaded yet
# Requeue the Span to be uploaded later
self._upload_queue.put(span_to_export)
self._upload_queue.task_done()
+ def _export_span_dispatch(self, span: ReadableSpan) -> None:
+ hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+
+ if "prompt" in hl_file:
+ export_func = self._export_prompt
+ elif "tool" in hl_file:
+ export_func = self._export_tool
+ elif "flow" in hl_file:
+ export_func = self._export_flow
+ else:
+ raise NotImplementedError(f"Unknown span type: {hl_file}")
+ export_func(span=span)
+
def _export_prompt(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(
+ span,
+ key=HL_FILE_OT_KEY,
+ )
+ log_object: dict[str, Any] = read_from_opentelemetry_span(
+ span,
+ key=HL_LOG_OT_KEY,
+ )
trace_metadata: Optional[dict[str, str]]
try:
- # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
+ trace_metadata = read_from_opentelemetry_span(
+ span,
+ key=HL_TRACE_METADATA_KEY,
+ ) # type: ignore
except KeyError:
trace_metadata = None
if trace_metadata:
trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
else:
trace_parent_id = None
- prompt: Optional[PromptKernelRequestParams] = file_object["prompt"]
+ prompt: PromptKernelRequestParams = file_object["prompt"]
path: str = file_object["path"]
response = self._client.prompts.log(
path=path,
@@ -123,7 +182,6 @@ def _export_flow(self, span: ReadableSpan) -> None:
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
trace_metadata: Optional[dict[str, str]]
try:
- # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
except KeyError:
trace_metadata = None
@@ -144,16 +202,3 @@ def _export_flow(self, span: ReadableSpan) -> None:
trace_parent_id=trace_parent_id,
)
self._uploaded_log_ids[span.context.span_id] = response.id
-
- def _export_dispatch(self, span: ReadableSpan) -> None:
- hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
-
- if "prompt" in hl_file:
- export_func = self._export_prompt
- elif "tool" in hl_file:
- export_func = self._export_tool
- elif "flow" in hl_file:
- export_func = self._export_flow
- else:
- raise NotImplementedError(f"Unknown span type: {hl_file}")
- export_func(span=span)
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index f277028d..ac22d836 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -1,7 +1,7 @@
-import builtins
-from typing import Any, Union
+from typing import Union
from opentelemetry.sdk.trace import ReadableSpan
+from opentelemetry.trace import SpanKind
from opentelemetry.util.types import AttributeValue
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
@@ -11,6 +11,35 @@
def _list_to_ott(lst: NestedList) -> NestedDict:
+ """Transforms list of values to be written into a dictionary with index values as keys.
+
+ When writing to Otel span attributes, only primitive values or lists are allowed.
+ Nested dictionaries must be linearised. For example, writing to span attribute `foo`
+ the dictionary value {'a': 7, 'b': 'hello'} would translated in the span attributes
+ dictionary to look like:
+ ```python
+ {
+ 'foo.a': 7,
+ 'foo.b': 'hello'
+ }
+ ```
+
+ Calling :func:`write_to_opentelemetry_span` with a list for write value will have
+ the list transformed into a pseudo-dictionary with index values as keys.
+
+ Examples:
+ ```python
+ _list_to_ott([1, 2, 'a']) == {'0': 1, '1': 2, '2': 'a'}
+ _list_to_ott([
+ "baz",
+ {'a': 6, 'b': 'hello'}
+ ]) == {
+ '0': 'baz',
+ '1.a': 6,
+ '1.b': 'hello'
+ }
+ ```
+ """
return {str(idx): val if not isinstance(val, list) else _list_to_ott(val) for idx, val in enumerate(lst)}
@@ -19,15 +48,13 @@ def write_to_opentelemetry_span(
value: Union[NestedDict, NestedList],
key: str = "",
) -> None:
- """Reverse of read_from_opentelemetry_span. Writes a Python object to the OpenTelemetry Span's attributes.
+ """Write a Python object to the OpenTelemetry Span's attributes. Reverse of :func:`read_from_opentelemetry_span`.
+
+ :param span: OpenTelemetry span to write values to
- See `read_from_opentelemetry_span` for more information.
+ :param value: Python object to write to the span attributes. Can also be a primitive value.
- Arguments:
- span: OpenTelemetry span to write values to
- value: Python object to write to the span attributes. Can also be a primitive value.
- key: Key prefix to write to the span attributes. The path to the values does not
- need to exist in the span attributes.
+ :param key: Key prefix to write to the span attributes. The path to the values does not need to exist in the span attributes.
"""
to_write_copy: Union[dict, AttributeValue]
if isinstance(value, list):
@@ -36,6 +63,31 @@ def write_to_opentelemetry_span(
to_write_copy = dict(value)
linearised_attributes: dict[str, AttributeValue] = {}
work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
+ """
+ Recurse through the dictionary value, building the OTel format keys in a DFS manner.
+
+ Example:
+ ```python
+ {
+ 'foo': {
+ 'a': 7,
+ 'b': 'hello'
+ },
+ "baz": [42, 43]
+ }
+
+ 1. Visit foo, push ('foo.a', 7), ('foo.b', 'hello') to stack
+ 2. Visit baz, push ('baz.0', 42), ('baz.1', 43) to stack
+ 3. Take each primitive key-value from the stack and write to the span attributes,
+ resulting in:
+ {
+ 'foo.a': 7,
+ 'foo.b': 'hello',
+ 'baz.0': 42,
+ 'baz.1': 43
+ }
+ ```
+ """
while len(work_stack) > 0:
key, value = work_stack.pop() # type: ignore
if isinstance(value, dict):
@@ -54,12 +106,9 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
in the span attributes. This function reconstructs the original structure from
a key prefix.
- Arguments:
- span: OpenTelemetry span to read values from
- key: Key prefix to read from the span attributes
+ :param span: OpenTelemetry span to read values from
+ :param key: Key prefix to read from the span attributes
- Returns:
- Python object stored in the span attributes under the key prefix.
Examples:
`span.attributes` contains the following attributes:
@@ -129,7 +178,7 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
sub_result[part] = span_value
else:
if part not in sub_result:
- sub_result[part] = dict()
+ sub_result[part] = {}
sub_result = sub_result[part] # type: ignore
for part in key.split("."):
@@ -140,12 +189,28 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
def is_llm_provider_call(span: ReadableSpan) -> bool:
"""Determines if the span was created by an Instrumentor for LLM provider clients."""
- return "llm.request.type" in span.attributes # type: ignore
+ if not span.instrumentation_scope:
+ return False
+ span_instrumentor_name = span.instrumentation_scope.name
+ # Match against the prefix of the Instrumentor name since
+ # the name might be version dependent e.g.
+ # "opentelemetry.instrumentation.openai.v1"
+ return span.kind == SpanKind.CLIENT and any(
+ span_instrumentor_name.startswith(instrumentor)
+ for instrumentor in [
+ "opentelemetry.instrumentation.openai",
+ "opentelemetry.instrumentation.groq",
+ "opentelemetry.instrumentation.anthropic",
+ "opentelemetry.instrumentation.cohere",
+ "opentelemetry.instrumentation.replicate",
+ ]
+ )
def is_humanloop_span(span: ReadableSpan) -> bool:
- """Determines if the span was created by the Humanloop SDK."""
+ """Check if the Span was created by the Humanloop SDK."""
try:
+ # Valid spans will have keys with the HL_FILE_OT_KEY and HL_LOG_OT_KEY prefixes present
read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
except KeyError:
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index eb935536..c470d0d7 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -18,8 +18,10 @@
class HumanloopSpanProcessor(SimpleSpanProcessor):
- """Merge information from Instrumentors used by Humanloop SDK into the
- Spans that will be exported to Humanloop.
+ """Enrich Humanloop spans with data from their children spans.
+
+ Spans that are not created by Humanloop decorators will be passed
+ to the Exporter as they are.
"""
def __init__(self, exporter: SpanExporter) -> None:
@@ -33,21 +35,28 @@ def __init__(self, exporter: SpanExporter) -> None:
def on_end(self, span: ReadableSpan) -> None:
if is_humanloop_span(span=span):
- _process_humanloop_span(span, self._children[span.context.span_id])
+ _process_span_dispatch(span, self._children[span.context.span_id])
+ # Release the reference to the Spans as they've already
+ # been sent to the Exporter
del self._children[span.context.span_id]
- self.span_exporter.export([span])
else:
if span.parent is not None and _is_instrumentor_span(span):
+ # Copy the Span and keep it until the Humanloop Span
+ # arrives in order to enrich it
self._children[span.parent.span_id].append(span)
+ # Pass the Span to the Exporter
+ self.span_exporter.export([span])
def _is_instrumentor_span(span: ReadableSpan) -> bool:
- # TODO: Extend in the future as needed. Spans not coming from
- # Instrumentors of interest should be dropped
+ """Determine if the Span contains information of interest for Spans created by Humanloop decorators."""
+ # At the moment we only enrich Spans created by the Prompt decorators
+ # As we add Instrumentors for other libraries, this function must
+ # be expanded
return is_llm_provider_call(span=span)
-def _process_humanloop_span(span: ReadableSpan, children_spans: list[ReadableSpan]):
+def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]):
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
if "prompt" in hl_file:
diff --git a/tests/conftest.py b/tests/conftest.py
index d71eed78..4a4f5137 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -23,6 +23,11 @@
@pytest.fixture(scope="function")
def opentelemetry_test_provider() -> TracerProvider:
+ """Create a test TracerProvider with a resource.
+
+ This is similar to the created TracerProvider in the
+ Humanloop class.
+ """
provider = TracerProvider(
resource=Resource.create(
{
@@ -47,6 +52,11 @@ def test_span(opentelemetry_test_provider: TracerProvider):
def opentelemetry_test_configuration(
opentelemetry_test_provider: TracerProvider,
) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
+ """Configure OTel backend without HumanloopSpanProcessor.
+
+ Spans created by Instrumentors will not be used to enrich
+ Humanloop Spans.
+ """
exporter = InMemorySpanExporter()
processor = SimpleSpanProcessor(exporter)
opentelemetry_test_provider.add_span_processor(processor)
@@ -74,6 +84,11 @@ def opentelemetry_test_configuration(
def opentelemetry_hl_test_configuration(
opentelemetry_test_provider: TracerProvider,
) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
+ """Configure OTel backend with HumanloopSpanProcessor.
+
+ Spans created by Instrumentors will be used to enrich
+ Humanloop Spans.
+ """
exporter = InMemorySpanExporter()
processor = HumanloopSpanProcessor(exporter=exporter)
opentelemetry_test_provider.add_span_processor(processor)
@@ -99,6 +114,10 @@ def opentelemetry_hl_test_configuration(
@pytest.fixture(scope="function")
def hl_test_exporter() -> HumanloopSpanExporter:
+ """
+ Test Exporter where HTTP calls to Humanloop API
+ are mocked.
+ """
client = MagicMock()
exporter = HumanloopSpanExporter(client=client)
return exporter
@@ -109,6 +128,9 @@ def opentelemetry_hl_with_exporter_test_configuration(
hl_test_exporter: HumanloopSpanExporter,
opentelemetry_test_provider: TracerProvider,
) -> Generator[tuple[Tracer, HumanloopSpanExporter], None, None]:
+ """Configure OTel backend with HumanloopSpanProcessor and
+ a HumanloopSpanExporter where HTTP calls are mocked.
+ """
processor = HumanloopSpanProcessor(exporter=hl_test_exporter)
opentelemetry_test_provider.add_span_processor(processor)
instrumentor = OpenAIInstrumentor()
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index f6b1e57c..18e56208 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -20,15 +20,16 @@
@tool()
def _random_string() -> str:
"""Return a random string."""
- # NOTE: This is very basic; scope is to check if it's
- # picked up and included in the Flow Trace
- return "".join(random.choices(string.ascii_letters + string.digits, k=10))
+ return "".join(
+ random.choices(
+ string.ascii_letters + string.digits,
+ k=10,
+ )
+ )
@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
- # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
- # provider calls. Could not find a way to intercept them coming from a Mock.
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
return (
client.chat.completions.create(
@@ -41,10 +42,6 @@ def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
) + _random_string()
-def _agent_call_no_decorator(messages: list[dict]) -> str:
- return _call_llm(messages=messages)
-
-
@flow(attributes={"foo": "bar", "baz": 7})
def _agent_call(messages: list[dict]) -> str:
return _call_llm(messages=messages)
@@ -74,10 +71,19 @@ def test_decorators_without_flow(
)
# WHEN exporting the spans
spans = exporter.get_finished_spans()
- # THEN 2 independent spans are exported with no relation to each other
- assert len(spans) == 2
- assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
+ # THEN 3 spans arrive at the exporter in the following order:
+ # 0. Intercepted OpenAI call, which is ignored by the exporter
+ # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
+ # 2. Prompt Span
+ assert len(spans) == 3
+ assert read_from_opentelemetry_span(
+ span=spans[1],
+ key=HL_FILE_OT_KEY,
+ )["tool"]
+ assert read_from_opentelemetry_span(
+ span=spans[2],
+ key=HL_FILE_OT_KEY,
+ )["prompt"]
for span in spans:
# THEN no metadata related to trace is present on either of them
with pytest.raises(KeyError):
@@ -102,24 +108,28 @@ def test_decorators_with_flow_decorator(
},
]
)
- # THEN 3 spans are created
+ # THEN 4 spans arrive at the exporter in the following order:
+ # 0. Intercepted OpenAI call, which is ignored by the exporter
+ # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
+ # 2. Prompt Span
+ # 3. Flow Span
spans = exporter.get_finished_spans()
- assert len(spans) == 3
+ assert len(spans) == 4
# THEN the span are returned bottom to top
- assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["flow"]
- tool_trace_metadata = read_from_opentelemetry_span(span=spans[0], key=HL_TRACE_METADATA_KEY)
- prompt_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
- flow_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
+ tool_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
+ prompt_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
+ flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
# THEN Tool span is a child of Prompt span
- assert tool_trace_metadata["trace_parent_id"] == spans[1].context.span_id
+ assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
- assert prompt_trace_metadata["trace_parent_id"] == spans[2].context.span_id
+ assert prompt_trace_metadata["trace_parent_id"] == spans[3].context.span_id
# THEN Prompt span is a child of Flow span
assert prompt_trace_metadata["is_flow_log"] is False
assert flow_trace_metadata["is_flow_log"]
- assert flow_trace_metadata["trace_id"] == spans[2].context.span_id
+ assert flow_trace_metadata["trace_id"] == spans[3].context.span_id
def test_flow_decorator_flow_in_flow(
@@ -132,29 +142,37 @@ def test_flow_decorator_flow_in_flow(
# WHEN Calling the _test_flow_in_flow function with specific messages
_flow_over_flow(call_llm_messages)
- # THEN Spans correctly produce a Flow Trace
+ # THEN 5 spans are arrive at the exporter in the following order:
+ # 0. Intercepted OpenAI call, which is ignored by the exporter
+ # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
+ # 2. Prompt Span
+ # 3. Nested Flow Span
+ # 4. Flow Span
spans = exporter.get_finished_spans()
- assert len(spans) == 4
- assert read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["flow"]
+ assert len(spans) == 5
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["prompt"]
assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
+ assert read_from_opentelemetry_span(span=spans[4], key=HL_FILE_OT_KEY)["flow"]
- tool_trace_metadata = read_from_opentelemetry_span(span=spans[0], key=HL_TRACE_METADATA_KEY)
- prompt_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
- nested_flow_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
- flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
- # THEN the nested flow points to the parent flow
- assert tool_trace_metadata["trace_parent_id"] == spans[1].context.span_id
+ tool_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
+ prompt_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
+ nested_flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
+ flow_trace_metadata = read_from_opentelemetry_span(span=spans[4], key=HL_TRACE_METADATA_KEY)
+ # THEN the parent of the Tool Log is the Prompt Log
+ assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
- assert prompt_trace_metadata["trace_parent_id"] == spans[2].context.span_id
+ # THEN the parent of the Prompt Log is the Flow Log
+ assert prompt_trace_metadata["trace_parent_id"] == spans[3].context.span_id
assert prompt_trace_metadata["is_flow_log"] is False
- assert nested_flow_trace_metadata["trace_id"] == spans[2].context.span_id
- # THEN the parent flow correctly points to itself
+ # THEN the nested Flow Log creates a new trace
+ assert nested_flow_trace_metadata["trace_id"] == spans[3].context.span_id
assert nested_flow_trace_metadata["is_flow_log"]
- assert nested_flow_trace_metadata["trace_parent_id"] == spans[3].context.span_id
+ # THEN the parent of the nested Flow Log is the upper Flow Log
+ assert nested_flow_trace_metadata["trace_parent_id"] == spans[4].context.span_id
+ # THEN the parent Flow Log correctly points to itself
+ assert flow_trace_metadata["trace_id"] == spans[4].context.span_id
assert flow_trace_metadata["is_flow_log"]
- assert flow_trace_metadata["trace_id"] == spans[3].context.span_id
def test_flow_decorator_with_hl_exporter(
@@ -167,43 +185,60 @@ def test_flow_decorator_with_hl_exporter(
with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
# WHEN calling the @flow decorated function
_agent_call(call_llm_messages)
- assert len(mock_export_method.call_args_list) == 3
- first_exported_span = mock_export_method.call_args_list[0][0][0][0]
- middle_exported_span = mock_export_method.call_args_list[1][0][0][0]
- last_exported_span = mock_export_method.call_args_list[2][0][0][0]
+
+ # Exporter is threaded, need to wait threads shutdown
+ time.sleep(3)
+
+ # THEN 4 spans are arrive at the exporter in the following order:
+ # 0. Intercepted OpenAI call, which is ignored by the exporter
+ # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
+ # 2. Prompt Span
+ # 3. Flow Span
+ assert len(mock_export_method.call_args_list) == 4
+
+ tool_span = mock_export_method.call_args_list[1][0][0][0]
+ prompt_span = mock_export_method.call_args_list[2][0][0][0]
+ flow_span = mock_export_method.call_args_list[3][0][0][0]
# THEN the last uploaded span is the Flow
- assert read_from_opentelemetry_span(span=last_exported_span, key=HL_FILE_OT_KEY)["flow"]["attributes"] == { # type: ignore[index,call-overload]
+ assert read_from_opentelemetry_span(
+ span=flow_span,
+ key=HL_FILE_OT_KEY,
+ )["flow"]["attributes"] == { # type: ignore[index,call-overload]
"foo": "bar",
"baz": 7,
}
# THEN the second uploaded span is the Prompt
- assert "prompt" in read_from_opentelemetry_span(span=middle_exported_span, key=HL_FILE_OT_KEY)
+ assert "prompt" in read_from_opentelemetry_span(
+ span=prompt_span,
+ key=HL_FILE_OT_KEY,
+ )
# THEN the first uploaded span is the Tool
- assert "tool" in read_from_opentelemetry_span(span=first_exported_span, key=HL_FILE_OT_KEY)
+ assert "tool" in read_from_opentelemetry_span(
+ span=tool_span,
+ key=HL_FILE_OT_KEY,
+ )
- # Potentially flaky: Exporter is threaded, need
- # to wait for them to finish
- time.sleep(3)
+ # NOTE: The type: ignore comments are caused by the MagicMock used to mock the HTTP client
# THEN the first Log uploaded is the Flow
- first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore[attr-defined]
+ first_log = exporter._client.flows.log.call_args_list[0][1] # type: ignore
assert "flow" in first_log
- exporter._client.flows.log.assert_called_once() # type: ignore[attr-defined]
- flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore[attr-defined]
- flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7}
- flow_log_id = exporter._client.flows.log.return_value # type: ignore[attr-defined]
+ exporter._client.flows.log.assert_called_once() # type: ignore
+ flow_log_call_args = exporter._client.flows.log.call_args_list[0] # type: ignore
+ assert flow_log_call_args.kwargs["flow"]["attributes"] == {"foo": "bar", "baz": 7}
+ flow_log_id = exporter._client.flows.log.return_value.id # type: ignore
# THEN the second Log uploaded is the Prompt
- exporter._client.prompts.log.assert_called_once() # type: ignore[attr-defined]
- prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore[attr-defined]
- prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id
- prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8
- prompt_log_id = exporter._client.prompts.log.return_value # type: ignore[attr-defined]
+ exporter._client.prompts.log.assert_called_once() # type: ignore
+ prompt_log_call_args = exporter._client.prompts.log.call_args_list[0] # type: ignore
+ assert prompt_log_call_args.kwargs["trace_parent_id"] == flow_log_id
+ assert prompt_log_call_args.kwargs["prompt"]["temperature"] == 0.8
+ prompt_log_id = exporter._client.prompts.log.return_value.id # type: ignore
# THEN the final Log uploaded is the Tool
- exporter._client.tools.log.assert_called_once() # type: ignore[attr-defined]
- tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore[attr-defined]
- tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id
+ exporter._client.tools.log.assert_called_once() # type: ignore
+ tool_log_call_args = exporter._client.tools.log.call_args_list[0] # type: ignore
+ assert tool_log_call_args.kwargs["trace_parent_id"] == prompt_log_id
def test_flow_decorator_hl_exporter_flow_inside_flow(
@@ -215,15 +250,29 @@ def test_flow_decorator_hl_exporter_flow_inside_flow(
with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
# WHEN calling the @flow decorated function
_flow_over_flow(call_llm_messages)
- assert len(mock_export_method.call_args_list) == 4
+
+ # Exporter is threaded, need to wait threads shutdown
+ time.sleep(3)
+
+ # THEN 5 spans are arrive at the exporter in the following order:
+ # 0. Intercepted OpenAI call, which is ignored by the exporter
+ # 1. Tool Span (called after the OpenAI call but before the Prompt Span finishes)
+ # 2. Prompt Span
+ # 3. Nested Flow Span
+ # 4. Flow Span
+ assert len(mock_export_method.call_args_list) == 5
# THEN the last uploaded span is the larger Flow
# THEN the second to last uploaded span is the nested Flow
- last_exported_span = mock_export_method.call_args_list[3][0][0][0]
- previous_exported_span = mock_export_method.call_args_list[2][0][0][0]
- last_span_flow_metadata = read_from_opentelemetry_span(span=last_exported_span, key=HL_TRACE_METADATA_KEY)
- previous_span_flow_metadata = read_from_opentelemetry_span(
- span=previous_exported_span, key=HL_TRACE_METADATA_KEY
+ flow_span = mock_export_method.call_args_list[4][0][0][0]
+ nested_flow_span = mock_export_method.call_args_list[3][0][0][0]
+ last_span_flow_metadata = read_from_opentelemetry_span(
+ span=flow_span,
+ key=HL_TRACE_METADATA_KEY,
+ )
+ flow_span_flow_metadata = read_from_opentelemetry_span(
+ span=nested_flow_span,
+ key=HL_TRACE_METADATA_KEY,
)
- assert previous_span_flow_metadata["trace_parent_id"] == last_exported_span.context.span_id
+ assert flow_span_flow_metadata["trace_parent_id"] == flow_span.context.span_id
assert last_span_flow_metadata["is_flow_log"]
- assert previous_span_flow_metadata["is_flow_log"]
+ assert flow_span_flow_metadata["is_flow_log"]
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 964c8bf2..cbc9c831 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -114,7 +114,7 @@ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -
raise ValueError(f"Unknown provider: {provider}")
-# prompt is a decorator, but for sake of brevity, I am using it as a higher-order function
+# NOTE: prompt is a decorator, but for brevity, it's used as a higher-order function in tests
_call_llm = prompt(
path=None,
template="You are an assistant on the following topics: {topics}.",
@@ -134,7 +134,7 @@ def test_prompt_decorator(
call_llm_messages: list[ChatCompletionMessageParam],
):
provider, model = provider_model
- # GIVEN a default OpenTelemetry configuration
+ # GIVEN an OpenTelemetry configuration without HumanloopSpanProcessor
_, exporter = opentelemetry_test_configuration
# WHEN using the Prompt decorator
_call_llm(
@@ -145,8 +145,11 @@ def test_prompt_decorator(
# THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
spans = exporter.get_finished_spans()
assert len(spans) == 2
+ assert not is_humanloop_span(span=spans[0])
+ assert is_humanloop_span(span=spans[1])
# THEN the Prompt span is not enhanced with information from the LLM provider
assert is_humanloop_span(spans[1])
+ # THEN no information is added to the Prompt span without the HumanloopSpanProcessor
assert spans[1].attributes.get("prompt") is None # type: ignore
@@ -157,7 +160,7 @@ def test_prompt_decorator_with_hl_processor(
call_llm_messages: list[ChatCompletionMessageParam],
):
provider, model = provider_model
- # GIVEN an OpenTelemetry configuration with a Humanloop Span processor
+ # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator
_call_llm(
@@ -165,18 +168,26 @@ def test_prompt_decorator_with_hl_processor(
model=model,
messages=call_llm_messages,
)
- # THEN a single span is created since the LLM provider call span is merged in the Prompt span
+ # THEN two spans are created: one for the OpenAI LLM provider call and one for the Prompt
spans = exporter.get_finished_spans()
- assert len(spans) == 1
- assert is_humanloop_span(span=spans[0])
+ assert len(spans) == 2
+ assert not is_humanloop_span(span=spans[0])
+ assert is_humanloop_span(span=spans[1])
+ # THEN the Prompt span is enhanced with information and forms a correct PromptKernel
prompt = PromptKernelRequest.model_validate(
- read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
+ read_from_opentelemetry_span(
+ span=spans[1],
+ key=HL_FILE_OT_KEY,
+ )["prompt"] # type: ignore
)
- # THEN temperature is taken from LLM provider call, but top_p is not since it is not specified
+ # THEN temperature is intercepted from LLM provider call
assert prompt.temperature == 0.8
+ # THEN the provider intercepted from LLM provider call
assert prompt.provider == provider
- assert prompt.top_p is None
+ # THEN model is intercepted from LLM provider call
assert prompt.model == model
+ # THEN top_p is not present since it's not present in the LLM provider call
+ assert prompt.top_p is None
@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
@@ -186,7 +197,7 @@ def test_prompt_decorator_with_defaults(
call_llm_messages: list[ChatCompletionMessageParam],
):
provider, model = provider_model
- # GIVEN an OpenTelemetry configuration with a Humanloop Span processor
+ # GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor
_, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator with default values
_call_llm_with_defaults(
@@ -194,17 +205,16 @@ def test_prompt_decorator_with_defaults(
model=model,
messages=call_llm_messages,
)
- # THEN a single span is created since the LLM provider call span is merged in the Prompt span
spans = exporter.get_finished_spans()
- assert len(spans) == 1
- assert is_humanloop_span(spans[0])
+ # THEN the Prompt span is enhanced with information and forms a correct PromptKernel
prompt = PromptKernelRequest.model_validate(
- read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
+ read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
)
- # THEN temperature is taken from decorator rather than intercepted LLM provider call
+ # THEN temperature intercepted from LLM provider call is overridden by default value
assert prompt.temperature == 0.9
- # THEN top_p is present
+ # THEN top_p is taken from decorator default value
assert prompt.top_p == 0.1
+ # THEN the provider intercepted from LLM provider call
assert prompt.model == model
@@ -217,12 +227,11 @@ def test_prompt_decorator_with_defaults(
{"frequency_penalty": 3},
),
)
-def test_default_values_fails_out_of_domain(hyperparameters: dict[str, float]):
+def test_hyperparameter_values_fail_out_of_domain(hyperparameters: dict[str, float]):
# GIVEN a Prompt decorated function
- # WHEN using default values that are out of domain
- # THEN an exception is raised
- with pytest.raises(ValueError):
+ with pytest.raises(ValueError):
+ # WHEN using default values that are out of domain
@prompt(path=None, template="You are an assistant on the following topics: {topics}.", **hyperparameters) # type: ignore[arg-type]
def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
load_dotenv()
@@ -236,3 +245,5 @@ def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
.choices[0]
.message.content
)
+
+ # THEN an exception is raised
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index d19f3df9..c9faed30 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -1,4 +1,4 @@
-from typing import Any, Optional, Union
+from typing import Any, Optional, TypedDict, Union
import pytest
from humanloop.decorators.tool import tool
@@ -42,7 +42,7 @@ def calculator(operation: str, num1: float, num2: float) -> float:
"num1": 1,
"num2": 2,
}
- hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers."
+ assert hl_file["tool"]["function"]["description"] == "Do arithmetic operations on two numbers."
# TODO: pydantic is inconsistent by dumping either tuple or list
assert calculator.json_schema == hl_file["tool"]["function"]
@@ -86,19 +86,138 @@ def test_calculator(a: Optional[float], b: float) -> float:
Validator.check_schema(test_calculator.json_schema)
-def test_no_annotation_on_parameter_fails():
- with pytest.raises(ValueError) as exc:
+def test_no_annotation_on_parameter():
+ # GIVEN a function annotated with @tool and without type hint on a parameter
+ @tool()
+ def calculator(a: Optional[float], b) -> float:
+ if a is None:
+ a = 0
+ return a + b
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correctly built and `b` is is of `any` type
+ # NOTE: JSONSchema dropped support for 'any' type, we include all types
+ # as a workaround
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": ["number", "null"]},
+ "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ },
+ "required": ("b",),
+ "type": "object",
+ },
+ "strict": True,
+ }
+
+ Validator.check_schema(calculator.json_schema)
+
+
+def test_dict_annotation_no_sub_types():
+ # GIVEN a function annotated with @tool and without type hint on a parameter
+ @tool()
+ def calculator(a: Optional[float], b: dict) -> float:
+ if a is None:
+ a = 0
+ return a + b["c"]
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correctly built and `b` accepts any type
+ # on both keys and values
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": ["number", "null"]},
+ "b": {
+ "type": "object",
+ "properties": {
+ "key": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ "value": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ },
+ },
+ },
+ "required": ("b",),
+ "type": "object",
+ },
+ "strict": True,
+ }
+
+ Validator.check_schema(calculator.json_schema)
+
+
+def test_list_annotation_no_sub_types():
+ # GIVEN a function annotated with @tool and without type hint on a parameter
+ @tool()
+ def calculator(a: Optional[float], b: Optional[list]) -> float:
+ if a is None:
+ a = 0
+ sum = a
+ if b is None:
+ return sum
+ for val in b:
+ sum += val
+ return sum
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correctly built and `b` accepts any type
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": ["number", "null"]},
+ "b": {
+ "type": ["array", "null"],
+ "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ },
+ },
+ "required": (),
+ "type": "object",
+ },
+ "strict": True,
+ }
- @tool()
- def bad_tool(a: Optional[float], b) -> float:
- if a is None:
- a = 0
- return a + b
- assert exc.value.args[0] == "bad_tool: b lacks a type hint annotation"
+def test_tuple_annotation_no_sub_types():
+ # GIVEN a function annotated with @tool and without type hint on a parameter
+ @tool()
+ def calculator(a: Optional[float], b: Optional[tuple]) -> float:
+ if a is None:
+ a = 0
+ sum = a
+ if b is None:
+ return sum
+ for val in b:
+ sum += val
+ return sum
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correctly built and `b` accepts any type
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": ["number", "null"]},
+ "b": {
+ "type": ["array", "null"],
+ "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ },
+ },
+ "required": (),
+ "type": "object",
+ },
+ "strict": True,
+ }
-def test_no_annotation_function_returns_does_not_fail():
+def test_function_without_return_annotation():
+ # GIVEN a function annotated with @tool and without type hint on the return value
+ # WHEN building the Tool kernel
@tool()
def foo(a: Optional[float], b: float) -> float:
"""Add two numbers."""
@@ -106,38 +225,51 @@ def foo(a: Optional[float], b: float) -> float:
a = 0
return a + b
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
def test_list_annotation_parameter(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
+ # GIVEN an OTel configuration with HL Processor
_, exporter = opentelemetry_hl_test_configuration
+ # WHEN defining a tool with a list parameter
@tool()
def foo(to_join: list[str]) -> str:
return " ".join(to_join)
assert "a b c" == foo(to_join=["a", "b", "c"])
+ # THEN the function call results in a Span
assert len(spans := exporter.get_finished_spans()) == 1
-
- tool_kernel = ToolKernelRequest.model_validate(read_from_opentelemetry_span(spans[0], HL_FILE_OT_KEY)["tool"])
-
+ # THEN a valid Tool Kernel can be parsed from the Span
+ tool_kernel = ToolKernelRequest.model_validate(
+ read_from_opentelemetry_span(
+ spans[0],
+ HL_FILE_OT_KEY,
+ )["tool"]
+ )
+ # THEN the argument is present in the Tool Kernel
assert "to_join" in tool_kernel.function.parameters["required"] # type: ignore
+ # THEN the argument is correctly described in the JSON schema
assert tool_kernel.function.parameters["properties"]["to_join"] == { # type: ignore
"type": "array",
"items": {"type": "string"},
}
-
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
-def test_list_list_parameter_annotation():
+def test_list_in_list_parameter_annotation():
+ # GIVEN a tool definition with a list of lists parameter
+ # WHEN building the Tool Kernel
@tool()
def nested_plain_join(to_join: list[list[str]]):
return " ".join([val for sub_list in to_join for val in sub_list])
+ # THEN the JSON schema is correctly built and parameter is correctly described
assert nested_plain_join.json_schema["parameters"]["properties"]["to_join"] == {
"type": "array",
"items": {
@@ -146,14 +278,18 @@ def nested_plain_join(to_join: list[list[str]]):
},
}
+ # THEN the JSONSchema is valid
Validator.check_schema(nested_plain_join.json_schema)
def test_complex_dict_annotation():
+ # GIVEN a tool definition with a dictionary parameter
+ # WHEN building the Tool Kernel
@tool()
def foo(a: dict[Union[int, str], list[str]]):
return a
+ # THEN the parameter is correctly described
assert foo.json_schema["parameters"]["properties"]["a"] == {
"type": "object",
"properties": {
@@ -162,14 +298,18 @@ def foo(a: dict[Union[int, str], list[str]]):
},
}
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
def test_tuple_annotation():
+ # GIVEN a tool definition with a tuple parameter
+ # WHEN building the Tool Kernel
@tool()
def foo(a: Optional[tuple[int, Optional[str], float]]):
return a
+ # THEN the parameter is correctly described
assert foo.json_schema["parameters"]["properties"]["a"] == {
"type": ["array", "null"],
"items": [
@@ -179,24 +319,32 @@ def foo(a: Optional[tuple[int, Optional[str], float]]):
],
}
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
def test_strict_false():
+ # GIVEN a tool definition with strict=False
+ # WHEN building the Tool Kernel
@tool(strict=False)
def foo(a: int, b: int) -> int:
return a + b
+ # THEN the JSON schema is correctly built
assert foo.json_schema["strict"] is False
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
def test_tool_no_args():
+ # GIVEN a tool definition without arguments
+ # WHEN building the Tool Kernel
@tool()
def foo():
return 42
+ # THEN the JSON schema is correctly built
assert foo.json_schema == {
"description": "",
"name": "foo",
@@ -208,4 +356,22 @@ def foo():
"strict": True,
}
+ # THEN the JSONSchema is valid
Validator.check_schema(foo.json_schema)
+
+
+def test_custom_types_throws():
+ # GIVEN a user-defined type
+ class Foo(TypedDict):
+ a: int # type: ignore
+ b: int # type: ignore
+
+ # WHEN defining a tool with a parameter of that type
+ with pytest.raises(ValueError) as exc:
+
+ @tool()
+ def foo_bar(foo: Foo):
+ return foo.a + foo.b # type: ignore
+
+ # THEN a ValueError is raised
+ assert exc.value.args[0].startswith("foo_bar: Unsupported type hint")
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
index 04d16a65..bbca31a7 100644
--- a/tests/otel/test_helpers.py
+++ b/tests/otel/test_helpers.py
@@ -153,7 +153,7 @@ def test_write_drops_dict_all_null_values(test_span: Span):
# THEN the value is not present in the span attributes
assert "key" not in test_span.attributes # type: ignore
with pytest.raises(KeyError):
- read_from_opentelemetry_span(test_span, "key") == {}
+ assert read_from_opentelemetry_span(test_span, "key") == {}
def test_write_drops_null_value_from_dict(test_span: Span):
@@ -162,4 +162,4 @@ def test_write_drops_null_value_from_dict(test_span: Span):
write_to_opentelemetry_span(test_span, {"x": 2, "y": None}, "key") # type: ignore
# WHEN reading the values from the span
# THEN the value with null value is not present in the span attributes
- read_from_opentelemetry_span(test_span, "key") == {"x": 2}
+ assert read_from_opentelemetry_span(test_span, "key") == {"x": 2}
From 96e91c63524a78efa38cf9490387feb22d76ee79 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 18:30:31 +0000
Subject: [PATCH 17/70] QA with live example
---
src/humanloop/decorators/flow.py | 9 ++---
src/humanloop/decorators/prompt.py | 44 +++++++++++++----------
src/humanloop/decorators/tool.py | 30 +++++++++-------
src/humanloop/otel/constants.py | 7 +++-
src/humanloop/otel/exporter.py | 32 +++++++++++++----
src/humanloop/otel/helpers.py | 6 +++-
src/humanloop/otel/processor.py | 10 +++---
tests/decorators/test_prompt_decorator.py | 3 ++
tests/decorators/test_tool_decorator.py | 26 ++++++--------
9 files changed, 104 insertions(+), 63 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 89d1c8f2..1d17e905 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -4,7 +4,7 @@
from humanloop.decorators.helpers import args_to_inputs
from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
@@ -67,9 +67,10 @@ def wrapper(*args, **kwargs):
key=HL_FILE_OT_KEY,
value={
"path": path if path else func.__name__,
- # OT span attributes are dropped if they are empty or null
- # Add 'EMPTY' token value otherwise the 'flow' key will be dropped
- "flow": {"attributes": attributes} if attributes else OT_EMPTY_ATTRIBUTE,
+ # If a None write is attempted then the attribute is removed
+ # making it impossible to distinguish between a Flow Span and
+ # Spans not created by Humanloop (see humanloop.otel.helpers.is_humanloop_span)
+ "flow": {"attributes": attributes} if attributes else HL_OT_EMPTY_VALUE,
},
)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 04940aa8..a9de046f 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -30,21 +30,44 @@ def prompt(
response_format: Optional[ResponseFormat] = None,
):
def decorator(func: Callable):
+ prompt_kernel = {}
+
if temperature is not None:
if not 0 <= temperature < 1:
raise ValueError(f"{func.__name__}: Temperature parameter must be between 0 and 1")
+ prompt_kernel["temperature"] = temperature
if top_p is not None:
if not 0 <= top_p <= 1:
raise ValueError(f"{func.__name__}: Top-p parameter must be between 0 and 1")
+ prompt_kernel["top_p"] = top_p
if presence_penalty is not None:
if not -2 <= presence_penalty <= 2:
raise ValueError(f"{func.__name__}: Presence penalty parameter must be between -2 and 2")
+ prompt_kernel["presence_penalty"] = presence_penalty
if frequency_penalty is not None:
if not -2 <= frequency_penalty <= 2:
raise ValueError(f"{func.__name__}: Frequency penalty parameter must be between -2 and 2")
+ prompt_kernel["frequency_penalty"] = frequency_penalty
+
+ for attr in [model, endpoint, template, provider, max_tokens, stop, other, seed, response_format]:
+ if attr is not None:
+ prompt_kernel[attr] = attr # type: ignore
+ for attr_name, attr_value in {
+ "model": model,
+ "endpoint": endpoint,
+ "template": template,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "stop": stop,
+ "other": other,
+ "seed": seed,
+ "response_format": response_format,
+ }.items():
+ if attr_value is not None:
+ prompt_kernel[attr_name] = attr_value # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
@@ -71,7 +94,6 @@ def wrapper(*args, **kwargs):
},
)
- # Write the Prompt Kernel to the Span on HL_FILE_OT_KEY
write_to_opentelemetry_span(
span=span,
key=HL_FILE_OT_KEY,
@@ -80,21 +102,7 @@ def wrapper(*args, **kwargs):
# Values not specified in the decorator will be
# completed with the intercepted values from the
# Instrumentors for LLM providers
- "prompt": {
- "template": template,
- "temperature": temperature,
- "top_p": top_p,
- "presence_penalty": presence_penalty,
- "frequency_penalty": frequency_penalty,
- "model": model,
- "endpoint": endpoint,
- "provider": provider,
- "max_tokens": max_tokens,
- "stop": stop,
- "other": other,
- "seed": seed,
- "response_format": response_format,
- },
+ "prompt": prompt_kernel or None, # noqa: F821
},
)
@@ -108,9 +116,7 @@ def wrapper(*args, **kwargs):
# Go back to previous trace context in Trace context
pop_trace_context()
- prompt_log = {}
- if output:
- prompt_log["output"] = output
+ prompt_log = {"output": output}
# Write the Prompt Log to the Span on HL_LOG_OT_KEY
write_to_opentelemetry_span(
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index e44e6d90..092beac7 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -4,13 +4,13 @@
import typing
import uuid
from functools import wraps
-from typing import Any, Callable, Mapping, Optional, Sequence, TypedDict, Union
+from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
-from humanloop.types.tool_function import ToolFunction
-from humanloop.types.tool_kernel_request import ToolKernelRequest
+from humanloop.requests.tool_function import ToolFunctionParams
+from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
from .helpers import args_to_inputs
@@ -30,7 +30,7 @@ def decorator(func: Callable):
)
# Mypy complains about adding attribute on function but it's nice UX
- func.json_schema = tool_kernel.function.model_dump() # type: ignore
+ func.json_schema = tool_kernel["function"] # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
@@ -63,7 +63,7 @@ def wrapper(*args, **kwargs):
key=HL_FILE_OT_KEY,
value={
"path": path if path else func.__name__,
- "tool": tool_kernel.model_dump(),
+ "tool": tool_kernel,
},
)
@@ -103,15 +103,16 @@ def _build_tool_kernel(
attributes: Optional[dict[str, Optional[Any]]],
setup_values: Optional[dict[str, Optional[Any]]],
strict: bool,
-) -> ToolKernelRequest:
+) -> ToolKernelRequestParams:
"""Build ToolKernelRequest object from decorated function."""
- return ToolKernelRequest(
+ return ToolKernelRequestParams(
source_code=textwrap.dedent(
# Remove the tool decorator from source code
inspect.getsource(func).split("\n", maxsplit=1)[1]
),
- attributes=attributes,
- setup_values=setup_values,
+ # Note: OTel complains about falsy values in attributes, so we use OT_EMPTY_ATTRIBUTE
+ attributes=attributes or HL_OT_EMPTY_VALUE, # type: ignore
+ setup_values=setup_values or HL_OT_EMPTY_VALUE, # type: ignore
function=_build_function_property(
func=func,
strict=strict,
@@ -119,16 +120,16 @@ def _build_tool_kernel(
)
-def _build_function_property(func: Callable, strict: bool) -> ToolFunction:
+def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams:
"""Build `function` property inside ToolKernelRequest."""
tool_name = func.__name__
description = func.__doc__
if description is None:
description = ""
- return ToolFunction(
+ return ToolFunctionParams(
name=tool_name,
description=description,
- parameters=_build_function_parameters_property(func),
+ parameters=_build_function_parameters_property(func), # type: ignore
strict=strict,
)
@@ -137,6 +138,7 @@ class _JSONSchemaFunctionParameters(TypedDict):
type: str
properties: dict[str, dict]
required: list[str]
+ additionalProperties: Literal[False]
def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
@@ -168,12 +170,14 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
type="object",
properties={},
required=[],
+ additionalProperties=False,
)
return _JSONSchemaFunctionParameters(
type="object",
# False positive, expected tuple[str] but got tuple[str, ...]
required=tuple(required), # type: ignore
properties=properties,
+ additionalProperties=False,
)
diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py
index 879d2eff..c683845c 100644
--- a/src/humanloop/otel/constants.py
+++ b/src/humanloop/otel/constants.py
@@ -1,4 +1,9 @@
+# Attribute name prefix on Humanloop spans for file-related attributes + path
HL_FILE_OT_KEY = "humanloop.file"
+# Attribute name prefix on Humanloop spans for log-related attributes
HL_LOG_OT_KEY = "humanloop.log"
+# Attribute name prefix on Humanloop spans for trace metadata
HL_TRACE_METADATA_KEY = "humanloop.flow.metadata"
-OT_EMPTY_ATTRIBUTE = "EMPTY"
+# OTel does not allow falsy values for top-level attributes e.g. foo
+# and None only on nested attributes e.g. foo.bar
+HL_OT_EMPTY_VALUE = "EMPTY"
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 0cee9015..c141523a 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -8,7 +8,7 @@
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY, OT_EMPTY_ATTRIBUTE
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -132,6 +132,18 @@ def _export_prompt(self, span: ReadableSpan) -> None:
span,
key=HL_LOG_OT_KEY,
)
+ # NOTE: Due to Otel conventions, attributes with value of None are removed
+ # If not present, instantiate as empty dictionary
+ if "inputs" not in log_object:
+ log_object["inputs"] = {}
+ # NOTE: Due to Otel conventions, lists are read as dictionaries
+ # E.g. ["a", "b"] -> {"0": "a", "1": "b"}
+ # We must convert the dictionary back to a list
+ # See humanloop.otel.helpers._list_to_ott
+ if "messages" not in log_object:
+ log_object["messages"] = []
+ else:
+ log_object["messages"] = list(log_object["messages"].values())
trace_metadata: Optional[dict[str, str]]
try:
trace_metadata = read_from_opentelemetry_span(
@@ -168,6 +180,10 @@ def _export_tool(self, span: ReadableSpan) -> None:
else:
trace_parent_id = None
tool = file_object["tool"]
+ if tool.get("attributes", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
+ tool["attributes"] = {}
+ if tool.get("setup_values", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
+ tool["setup_values"] = {}
path: str = file_object["path"]
response = self._client.tools.log(
path=path,
@@ -189,11 +205,15 @@ def _export_flow(self, span: ReadableSpan) -> None:
trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
else:
trace_parent_id = None
- flow: Optional[FlowKernelRequestParams] = file_object["flow"]
- if flow == OT_EMPTY_ATTRIBUTE:
- flow = {
- "attributes": {},
- }
+ # Cannot write falsy values except None in OTel Span attributes
+ # If a None write is attempted then the attribute is removed
+ # making it impossible to distinguish between a Flow Span and
+ # Spans not created by Humanloop (see humanloop.otel.helpers.is_humanloop_span)
+ flow: FlowKernelRequestParams
+ if file_object["flow"] == HL_OT_EMPTY_VALUE:
+ flow = {"attributes": {}}
+ else:
+ flow = file_object["flow"]
path: str = file_object["path"]
response = self._client.flows.log(
path=path,
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index ac22d836..9598c3ed 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -50,6 +50,10 @@ def write_to_opentelemetry_span(
) -> None:
"""Write a Python object to the OpenTelemetry Span's attributes. Reverse of :func:`read_from_opentelemetry_span`.
+ Note: OTel will complain about falsy values other then None, and keys with value set
+ to None will be silently dropped. Consider adding a placeholder value if the key should
+ be present in the span attributes.
+
:param span: OpenTelemetry span to write values to
:param value: Python object to write to the span attributes. Can also be a primitive value.
@@ -65,7 +69,7 @@ def write_to_opentelemetry_span(
work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
"""
Recurse through the dictionary value, building the OTel format keys in a DFS manner.
-
+
Example:
```python
{
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index c470d0d7..e3542d05 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -82,7 +82,6 @@ def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan
def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
- # TODO: Use children_spans in the future
tool_log = read_from_opentelemetry_span(tool_span, key=HL_LOG_OT_KEY)
if tool_span.start_time:
tool_log["start_time"] = tool_span.start_time / 1e9
@@ -118,7 +117,9 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt = hl_file.get("prompt", {})
+ prompt = hl_file.get("prompt")
+ if not prompt:
+ prompt = {}
if not prompt.get("model"):
prompt["model"] = gen_ai_object.get("request", {}).get("model", None)
if not prompt.get("endpoint"):
@@ -159,7 +160,8 @@ def _enrich_prompt_span_log(prompt_span: ReadableSpan, llm_provider_call_span: R
hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens")
if len(gen_ai_object.get("completion", [])) > 0:
hl_log["finish_reason"] = gen_ai_object.get("completion", {}).get("0", {}).get("finish_reason")
- hl_log["messages"] = gen_ai_object.get("prompt", [])
+ # Note: read_from_opentelemetry_span returns the list as a dict due to Otel conventions
+ hl_log["messages"] = gen_ai_object.get("prompt")
if prompt_span.start_time:
hl_log["start_time"] = prompt_span.start_time / 1e9
@@ -180,7 +182,7 @@ def _enrich_prompt_span_log(prompt_span: ReadableSpan, llm_provider_call_span: R
inputs[key] = parsed_value
except Exception as e:
logging.error(e)
- inputs = {}
+ inputs = None
finally:
hl_log["inputs"] = inputs
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index cbc9c831..998d355f 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -80,6 +80,9 @@ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -
.message.content
)
except GroqNotFoundError:
+ # NOTE: Tests in this file are integration tests that rely on live LLM provider
+ # clients. If a test fails, it might be flaky. If this happens, consider adding
+ # a skip mechanism similar to Groq
pytest.skip("GROQ not available")
if provider == "cohere":
client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index c9faed30..e66bcf10 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -78,10 +78,10 @@ def test_calculator(a: Optional[float], b: float) -> float:
return a + b
assert test_calculator(3, 4) == 7
- assert len(spans := exporter.get_finished_spans()) == 1
- tool_kernel = ToolKernelRequest.model_validate(read_from_opentelemetry_span(spans[0], HL_FILE_OT_KEY)["tool"])
- assert test_calculator.json_schema["parameters"]["properties"]["a"] == {"type": ["number", "null"]}
- assert tool_kernel.function.parameters["required"] == ("b",) # type: ignore
+ assert len(exporter.get_finished_spans()) == 1
+ assert test_calculator.json_schema["parameters"]["properties"]["a"] == {
+ "type": ["number", "null"],
+ }
Validator.check_schema(test_calculator.json_schema)
@@ -108,6 +108,7 @@ def calculator(a: Optional[float], b) -> float:
},
"required": ("b",),
"type": "object",
+ "additionalProperties": False,
},
"strict": True,
}
@@ -142,6 +143,7 @@ def calculator(a: Optional[float], b: dict) -> float:
},
"required": ("b",),
"type": "object",
+ "additionalProperties": False,
},
"strict": True,
}
@@ -177,6 +179,7 @@ def calculator(a: Optional[float], b: Optional[list]) -> float:
},
"required": (),
"type": "object",
+ "additionalProperties": False,
},
"strict": True,
}
@@ -210,6 +213,7 @@ def calculator(a: Optional[float], b: Optional[tuple]) -> float:
},
"required": (),
"type": "object",
+ "additionalProperties": False,
},
"strict": True,
}
@@ -243,18 +247,9 @@ def foo(to_join: list[str]) -> str:
assert "a b c" == foo(to_join=["a", "b", "c"])
# THEN the function call results in a Span
- assert len(spans := exporter.get_finished_spans()) == 1
- # THEN a valid Tool Kernel can be parsed from the Span
- tool_kernel = ToolKernelRequest.model_validate(
- read_from_opentelemetry_span(
- spans[0],
- HL_FILE_OT_KEY,
- )["tool"]
- )
- # THEN the argument is present in the Tool Kernel
- assert "to_join" in tool_kernel.function.parameters["required"] # type: ignore
+ assert len(exporter.get_finished_spans()) == 1
# THEN the argument is correctly described in the JSON schema
- assert tool_kernel.function.parameters["properties"]["to_join"] == { # type: ignore
+ assert foo.json_schema["parameters"]["properties"]["to_join"] == { # type: ignore
"type": "array",
"items": {"type": "string"},
}
@@ -352,6 +347,7 @@ def foo():
"properties": {},
"required": [],
"type": "object",
+ "additionalProperties": False,
},
"strict": True,
}
From 791f6157d789d53d138555622be87c9ed40dac2e Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 18:43:53 +0000
Subject: [PATCH 18/70] Fix for flaky replicate test
---
tests/decorators/test_prompt_decorator.py | 2 ++
1 file changed, 2 insertions(+)
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 998d355f..ecf49c8a 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -113,6 +113,8 @@ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -
},
):
output += str(event)
+ if not output:
+ pytest.skip("Replicate not available")
return output
raise ValueError(f"Unknown provider: {provider}")
From a4db56509909440c2b89fb077114e37f1d0fd727 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 20:13:17 +0000
Subject: [PATCH 19/70] Reverted changes to a file modified by Fern
---
src/humanloop/__init__.py | 294 +++++++++++++++++++-------------------
1 file changed, 147 insertions(+), 147 deletions(-)
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index a0283dd2..6134f370 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,9 +1,152 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import (
+ AgentConfigResponse,
+ BaseModelsUserResponse,
+ BooleanEvaluatorStatsResponse,
+ ChatMessage,
+ ChatMessageContent,
+ ChatMessageContentItem,
+ ChatRole,
+ ChatToolType,
+ CodeEvaluatorRequest,
+ CommitRequest,
+ ConfigToolResponse,
+ CreateDatapointRequest,
+ CreateDatapointRequestTargetValue,
+ CreateEvaluatorLogResponse,
+ CreateFlowLogResponse,
+ CreatePromptLogResponse,
+ CreateToolLogResponse,
+ DashboardConfiguration,
+ DatapointResponse,
+ DatapointResponseTargetValue,
+ DatasetResponse,
+ DirectoryResponse,
+ DirectoryWithParentsAndChildrenResponse,
+ DirectoryWithParentsAndChildrenResponseFilesItem,
+ EnvironmentResponse,
+ EnvironmentTag,
+ EvaluatedVersionResponse,
+ EvaluateeRequest,
+ EvaluateeResponse,
+ EvaluationEvaluatorResponse,
+ EvaluationReportLogResponse,
+ EvaluationResponse,
+ EvaluationStats,
+ EvaluationStatus,
+ EvaluationsDatasetRequest,
+ EvaluationsRequest,
+ EvaluatorActivationDeactivationRequest,
+ EvaluatorActivationDeactivationRequestActivateItem,
+ EvaluatorActivationDeactivationRequestDeactivateItem,
+ EvaluatorAggregate,
+ EvaluatorArgumentsType,
+ EvaluatorConfigResponse,
+ EvaluatorJudgmentNumberLimit,
+ EvaluatorJudgmentOptionResponse,
+ EvaluatorLogResponse,
+ EvaluatorLogResponseJudgment,
+ EvaluatorResponse,
+ EvaluatorResponseSpec,
+ EvaluatorReturnTypeEnum,
+ ExternalEvaluatorRequest,
+ FeedbackType,
+ FileEnvironmentResponse,
+ FileEnvironmentResponseFile,
+ FileRequest,
+ FileType,
+ FilesToolType,
+ FlowKernelRequest,
+ FlowLogResponse,
+ FlowResponse,
+ FunctionTool,
+ FunctionToolChoice,
+ HttpValidationError,
+ HumanEvaluatorRequest,
+ HumanEvaluatorRequestReturnType,
+ ImageChatContent,
+ ImageUrl,
+ ImageUrlDetail,
+ InputResponse,
+ LinkedToolResponse,
+ ListDatasets,
+ ListEvaluators,
+ ListFlows,
+ ListPrompts,
+ ListTools,
+ LlmEvaluatorRequest,
+ LogResponse,
+ ModelEndpoints,
+ ModelProviders,
+ MonitoringEvaluatorEnvironmentRequest,
+ MonitoringEvaluatorResponse,
+ MonitoringEvaluatorState,
+ MonitoringEvaluatorVersionRequest,
+ NumericEvaluatorStatsResponse,
+ ObservabilityStatus,
+ OverallStats,
+ PaginatedDataEvaluationReportLogResponse,
+ PaginatedDataEvaluatorResponse,
+ PaginatedDataFlowResponse,
+ PaginatedDataLogResponse,
+ PaginatedDataPromptResponse,
+ PaginatedDataToolResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDatapointResponse,
+ PaginatedDatasetResponse,
+ PaginatedEvaluationResponse,
+ PaginatedPromptLogResponse,
+ PaginatedSessionResponse,
+ PlatformAccessEnum,
+ ProjectSortBy,
+ PromptCallLogResponse,
+ PromptCallResponse,
+ PromptCallResponseToolChoice,
+ PromptCallStreamResponse,
+ PromptKernelRequest,
+ PromptKernelRequestStop,
+ PromptKernelRequestTemplate,
+ PromptLogResponse,
+ PromptLogResponseToolChoice,
+ PromptResponse,
+ PromptResponseStop,
+ PromptResponseTemplate,
+ ProviderApiKeys,
+ ResponseFormat,
+ ResponseFormatType,
+ SelectEvaluatorStatsResponse,
+ SortOrder,
+ TextChatContent,
+ TextEvaluatorStatsResponse,
+ TimeUnit,
+ ToolCall,
+ ToolChoice,
+ ToolFunction,
+ ToolKernelRequest,
+ ToolLogResponse,
+ ToolResponse,
+ TraceStatus,
+ UpdateDatesetAction,
+ UpdateEvaluationStatusRequest,
+ UserResponse,
+ Valence,
+ ValidationError,
+ ValidationErrorLocItem,
+ VersionDeploymentResponse,
+ VersionDeploymentResponseFile,
+ VersionIdResponse,
+ VersionIdResponseVersion,
+ VersionReferenceResponse,
+ VersionStatsResponse,
+ VersionStatsResponseEvaluatorVersionStatsItem,
+ VersionStatus,
+)
+from .errors import UnprocessableEntityError
from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .client import AsyncHumanloop, Humanloop
from .environment import HumanloopEnvironment
-from .errors import UnprocessableEntityError
from .evaluators import (
CreateEvaluatorLogRequestJudgment,
CreateEvaluatorLogRequestJudgmentParams,
@@ -53,9 +196,9 @@
EvaluationEvaluatorResponseParams,
EvaluationLogResponseParams,
EvaluationResponseParams,
+ EvaluationStatsParams,
EvaluationsDatasetRequestParams,
EvaluationsRequestParams,
- EvaluationStatsParams,
EvaluatorActivationDeactivationRequestActivateItemParams,
EvaluatorActivationDeactivationRequestDeactivateItemParams,
EvaluatorActivationDeactivationRequestParams,
@@ -103,12 +246,12 @@
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
- PaginatedDatapointResponseParams,
PaginatedDataPromptResponseParams,
- PaginatedDatasetResponseParams,
PaginatedDataToolResponseParams,
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDatapointResponseParams,
+ PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PromptCallLogResponseParams,
PromptCallResponseParams,
@@ -147,149 +290,6 @@
VersionStatsResponseEvaluatorVersionStatsItemParams,
VersionStatsResponseParams,
)
-from .types import (
- AgentConfigResponse,
- BaseModelsUserResponse,
- BooleanEvaluatorStatsResponse,
- ChatMessage,
- ChatMessageContent,
- ChatMessageContentItem,
- ChatRole,
- ChatToolType,
- CodeEvaluatorRequest,
- CommitRequest,
- ConfigToolResponse,
- CreateDatapointRequest,
- CreateDatapointRequestTargetValue,
- CreateEvaluatorLogResponse,
- CreateFlowLogResponse,
- CreatePromptLogResponse,
- CreateToolLogResponse,
- DashboardConfiguration,
- DatapointResponse,
- DatapointResponseTargetValue,
- DatasetResponse,
- DirectoryResponse,
- DirectoryWithParentsAndChildrenResponse,
- DirectoryWithParentsAndChildrenResponseFilesItem,
- EnvironmentResponse,
- EnvironmentTag,
- EvaluatedVersionResponse,
- EvaluateeRequest,
- EvaluateeResponse,
- EvaluationEvaluatorResponse,
- EvaluationReportLogResponse,
- EvaluationResponse,
- EvaluationsDatasetRequest,
- EvaluationsRequest,
- EvaluationStats,
- EvaluationStatus,
- EvaluatorActivationDeactivationRequest,
- EvaluatorActivationDeactivationRequestActivateItem,
- EvaluatorActivationDeactivationRequestDeactivateItem,
- EvaluatorAggregate,
- EvaluatorArgumentsType,
- EvaluatorConfigResponse,
- EvaluatorJudgmentNumberLimit,
- EvaluatorJudgmentOptionResponse,
- EvaluatorLogResponse,
- EvaluatorLogResponseJudgment,
- EvaluatorResponse,
- EvaluatorResponseSpec,
- EvaluatorReturnTypeEnum,
- ExternalEvaluatorRequest,
- FeedbackType,
- FileEnvironmentResponse,
- FileEnvironmentResponseFile,
- FileRequest,
- FilesToolType,
- FileType,
- FlowKernelRequest,
- FlowLogResponse,
- FlowResponse,
- FunctionTool,
- FunctionToolChoice,
- HttpValidationError,
- HumanEvaluatorRequest,
- HumanEvaluatorRequestReturnType,
- ImageChatContent,
- ImageUrl,
- ImageUrlDetail,
- InputResponse,
- LinkedToolResponse,
- ListDatasets,
- ListEvaluators,
- ListFlows,
- ListPrompts,
- ListTools,
- LlmEvaluatorRequest,
- LogResponse,
- ModelEndpoints,
- ModelProviders,
- MonitoringEvaluatorEnvironmentRequest,
- MonitoringEvaluatorResponse,
- MonitoringEvaluatorState,
- MonitoringEvaluatorVersionRequest,
- NumericEvaluatorStatsResponse,
- ObservabilityStatus,
- OverallStats,
- PaginatedDataEvaluationReportLogResponse,
- PaginatedDataEvaluatorResponse,
- PaginatedDataFlowResponse,
- PaginatedDataLogResponse,
- PaginatedDatapointResponse,
- PaginatedDataPromptResponse,
- PaginatedDatasetResponse,
- PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
- PaginatedEvaluationResponse,
- PaginatedPromptLogResponse,
- PaginatedSessionResponse,
- PlatformAccessEnum,
- ProjectSortBy,
- PromptCallLogResponse,
- PromptCallResponse,
- PromptCallResponseToolChoice,
- PromptCallStreamResponse,
- PromptKernelRequest,
- PromptKernelRequestStop,
- PromptKernelRequestTemplate,
- PromptLogResponse,
- PromptLogResponseToolChoice,
- PromptResponse,
- PromptResponseStop,
- PromptResponseTemplate,
- ProviderApiKeys,
- ResponseFormat,
- ResponseFormatType,
- SelectEvaluatorStatsResponse,
- SortOrder,
- TextChatContent,
- TextEvaluatorStatsResponse,
- TimeUnit,
- ToolCall,
- ToolChoice,
- ToolFunction,
- ToolKernelRequest,
- ToolLogResponse,
- ToolResponse,
- TraceStatus,
- UpdateDatesetAction,
- UpdateEvaluationStatusRequest,
- UserResponse,
- Valence,
- ValidationError,
- ValidationErrorLocItem,
- VersionDeploymentResponse,
- VersionDeploymentResponseFile,
- VersionIdResponse,
- VersionIdResponseVersion,
- VersionReferenceResponse,
- VersionStatsResponse,
- VersionStatsResponseEvaluatorVersionStatsItem,
- VersionStatus,
-)
from .version import __version__
__all__ = [
From 618bcf5c537e190d02d78a692b95840fd886bbcb Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 20:22:25 +0000
Subject: [PATCH 20/70] Code is self-aware and modified the docstring to
account for daylight saving time
---
src/humanloop/flows/client.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 4b4671e7..1884b45c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -193,10 +193,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
+ "2024-07-08 22:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
+ "2024-07-08 22:40:39+00:00",
),
)
"""
@@ -1357,10 +1357,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
trace_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
+ "2024-07-08 22:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
+ "2024-07-08 22:40:39+00:00",
),
)
From 163193a125137477f703bc263b8201ec2bfb9d42 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 20:33:07 +0000
Subject: [PATCH 21/70] Replicate integration tests still flaky
---
tests/decorators/test_prompt_decorator.py | 25 +++++++++++++++--------
tests/decorators/test_tool_decorator.py | 1 -
2 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index ecf49c8a..980d8d29 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -21,6 +21,9 @@
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
+# replicate has no typing stubs, ruff wants this import placed here
+from replicate.exceptions import ModelError as ReplicateModelError # type: ignore
+
_PROVIDER_AND_MODEL = [
("openai", "gpt-4o"),
("groq", "llama3-8b-8192"),
@@ -103,16 +106,20 @@ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -
).text
if provider == "replicate":
# TODO: Instrumentor only picks up methods on module-level, not client level
+ # This should be documented somewhere or changed
replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY")
- output = ""
- for event in replicate.run(
- model,
- input={
- "prompt": messages[0]["content"] + " " + messages[-1]["content"],
- "temperature": 0.8,
- },
- ):
- output += str(event)
+ try:
+ output = ""
+ for event in replicate.run(
+ model,
+ input={
+ "prompt": messages[0]["content"] + " " + messages[-1]["content"],
+ "temperature": 0.8,
+ },
+ ):
+ output += str(event)
+ except ReplicateModelError:
+ pytest.skip("Replicate not available")
if not output:
pytest.skip("Replicate not available")
return output
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index e66bcf10..2b3475fd 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -4,7 +4,6 @@
from humanloop.decorators.tool import tool
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
from humanloop.otel.helpers import read_from_opentelemetry_span
-from humanloop.types.tool_kernel_request import ToolKernelRequest
from jsonschema.protocols import Validator
from opentelemetry.sdk.trace import Tracer
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
From baee7cd5ce91a5987f157190b282bf6ce4a2e8c8 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Wed, 30 Oct 2024 20:48:48 +0000
Subject: [PATCH 22/70] false positive from mypy
From abf09e6f1e211be388912aebae52f9010cd28f0e Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 09:41:19 +0000
Subject: [PATCH 23/70] Expanded some docstrings
---
src/humanloop/client.py | 6 ++++++
src/humanloop/decorators/tool.py | 2 +-
src/humanloop/otel/exporter.py | 4 ++++
src/humanloop/otel/processor.py | 14 ++++++++++++--
4 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 3d2e0927..ecec331c 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -395,6 +395,12 @@ def entrypoint():
print(f"Assistant: {response}")
```
+ In this example, the Flow instruments a conversational agent where the
+ Prompt defined in `call_llm` is called multiple times in a loop. Calling
+ `entrypoint` will create a Flow Trace under which multiple Prompt Logs
+ will be nested, allowing you to track the whole conversation session
+ between the user and the assistant.
+
:param path: The path to the Flow. If not provided, the function name
will be used as the path and the File will be created in the root
of your Humanloop's organization workspace.
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 092beac7..42ade20b 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -136,7 +136,7 @@ def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams
class _JSONSchemaFunctionParameters(TypedDict):
type: str
- properties: dict[str, dict]
+ properties: dict[str, dict, list]
required: list[str]
additionalProperties: Literal[False]
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index c141523a..93d0e34e 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -33,6 +33,10 @@ def __init__(
client: "BaseHumanloop",
worker_threads: Optional[int] = None,
) -> None:
+ """Upload Spans created by SDK decorators to Humanloop.
+
+ Spans not created by Humanloop SDK decorators will be ignored.
+ """
super().__init__()
self._client = client
self._uploaded_log_ids: dict[
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index e3542d05..b5ae6b2b 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -20,7 +20,17 @@
class HumanloopSpanProcessor(SimpleSpanProcessor):
"""Enrich Humanloop spans with data from their children spans.
- Spans that are not created by Humanloop decorators will be passed
+ The decorators add Instrumentors to the OpenTelemetry TracerProvider
+ that log interactions with common LLM libraries. These Instrumentors
+ produce Spans which contain information that can be used to enrich the
+ Humanloop File Kernels.
+
+ For example, Instrumentors for LLM provider libraries intercept
+ hyperparameters used in the API call to the model to build the
+ Prompt File definition when using the @prompt decorator.
+
+ Spans created that are not created by Humanloop decorators, such as
+ those created by the Instrumentors mentioned above, will be passed
to the Exporter as they are.
"""
@@ -29,7 +39,7 @@ def __init__(self, exporter: SpanExporter) -> None:
# Span parent to Span children map
self._children: dict[int, list] = defaultdict(list)
- # TODO: Could override on_start and process Flow spans ahead of time
+ # NOTE: Could override on_start and process Flow spans ahead of time
# and PATCH the created Logs in on_end. A special type of ReadableSpan could be
# used for this
From f309e360e32500ef5f01729197b0d6318106b93a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 10:08:40 +0000
Subject: [PATCH 24/70] Better typing when processing prompt kernel
---
src/humanloop/decorators/tool.py | 2 +-
src/humanloop/otel/processor.py | 69 ++++++++++++++++++++++++--------
2 files changed, 53 insertions(+), 18 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 42ade20b..30a26c8c 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -136,7 +136,7 @@ def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams
class _JSONSchemaFunctionParameters(TypedDict):
type: str
- properties: dict[str, dict, list]
+ properties: dict[str, typing.Union[dict, list]]
required: list[str]
additionalProperties: Literal[False]
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index b5ae6b2b..103cad54 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -7,6 +7,7 @@
import parse # type: ignore
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
+from pydantic import ValidationError as PydanticValidationError
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
from humanloop.otel.helpers import (
@@ -15,6 +16,10 @@
read_from_opentelemetry_span,
write_to_opentelemetry_span,
)
+from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
+from humanloop.types.prompt_kernel_request import PromptKernelRequest
+
+logger = logging.getLogger("humanloop.sdk")
class HumanloopSpanProcessor(SimpleSpanProcessor):
@@ -79,16 +84,20 @@ def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan
_process_flow(flow_span=span, children_spans=children_spans)
return
else:
- logging.error("Invalid span type")
+ logger.error("Unknown Humanloop File Span %s", span)
def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan]):
if len(children_spans) == 0:
return
- child_span = children_spans[0]
- assert is_llm_provider_call(child_span)
- _enrich_prompt_span_file(prompt_span, child_span)
- _enrich_prompt_span_log(prompt_span, child_span)
+ for child_span in children_spans:
+ if is_llm_provider_call(child_span):
+ _enrich_prompt_span_file(prompt_span, child_span)
+ _enrich_prompt_span_log(prompt_span, child_span)
+ # NOTE: @prompt decorator expects a single LLM provider call
+ # to happen in the function. If there are more than one, we
+ # ignore the rest
+ break
def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
@@ -127,30 +136,56 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt = hl_file.get("prompt")
- if not prompt:
- prompt = {}
- if not prompt.get("model"):
+ prompt = PromptKernelRequestParams(
+ model=hl_file.get("prompt", {}).get("model"),
+ endpoint=hl_file.get("prompt", {}).get("endpoint"),
+ template=hl_file.get("prompt", {}).get("template"),
+ provider=hl_file.get("prompt", {}).get("provider"),
+ temperature=hl_file.get("prompt", {}).get("temperature"),
+ max_tokens=hl_file.get("prompt", {}).get("max_tokens"),
+ top_p=hl_file.get("prompt", {}).get("top_p"),
+ stop=hl_file.get("prompt", {}).get("stop"),
+ presence_penalty=hl_file.get("prompt", {}).get("presence_penalty"),
+ frequency_penalty=hl_file.get("prompt", {}).get("frequency_penalty"),
+ other=hl_file.get("prompt", {}).get("other"),
+ seed=hl_file.get("prompt", {}).get("seed"),
+ response_format=hl_file.get("prompt", {}).get("response_format"),
+ tools=[],
+ linked_tools=[],
+ attributes={},
+ )
+ # Check if the keys were set via the @prompt decorator
+ # Otherwise use the information from the intercepted LLM
+ # provider call
+ if not prompt["model"]:
prompt["model"] = gen_ai_object.get("request", {}).get("model", None)
- if not prompt.get("endpoint"):
+ if not prompt["endpoint"]:
prompt["endpoint"] = llm_object.get("request", {}).get("type")
- if not prompt.get("provider"):
+ if not prompt["provider"]:
prompt["provider"] = gen_ai_object.get("system", None)
if prompt["provider"]:
+ # Normalize provider name; Interceptors output the names with
+ # different capitalization e.g. OpenAI instead of openai
prompt["provider"] = prompt["provider"].lower()
- if not prompt.get("temperature"):
+ if not prompt["temperature"]:
prompt["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
- if not prompt.get("top_p"):
+ if not prompt["top_p"]:
prompt["top_p"] = gen_ai_object.get("request", {}).get("top_p", None)
- if not prompt.get("max_tokens"):
+ if not prompt["max_tokens"]:
prompt["max_tokens"] = gen_ai_object.get("request", {}).get("max_tokens", None)
- if not prompt.get("presence_penalty"):
+ if not prompt["presence_penalty"]:
prompt["presence_penalty"] = llm_object.get("presence_penalty", None)
- if not prompt.get("frequency_penalty"):
+ if not prompt["frequency_penalty"]:
prompt["frequency_penalty"] = llm_object.get("frequency_penalty", None)
- hl_file["prompt"] = prompt
+ try:
+ # Validate the Prompt Kernel
+ PromptKernelRequest.model_validate(obj=prompt)
+ except PydanticValidationError as e:
+ logger.error("Could not validate Prompt Kernel extracted from Span: %s", e)
+ # Write the enriched Prompt Kernel back to the Span
+ hl_file["prompt"] = prompt
write_to_opentelemetry_span(
span=prompt_span,
key=HL_FILE_OT_KEY,
From 473e8cde1cc83893f0320aa929852e1efd1d4452 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 12:26:17 +0000
Subject: [PATCH 25/70] Added attributes on prompt + jsonify log output if not
string
---
src/humanloop/client.py | 2 +
src/humanloop/decorators/prompt.py | 10 ++--
src/humanloop/otel/exporter.py | 15 +++++
src/humanloop/otel/helpers.py | 5 ++
src/humanloop/otel/processor.py | 70 ++++++++---------------
tests/decorators/test_prompt_decorator.py | 52 +++++++++++++++++
6 files changed, 103 insertions(+), 51 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index ecec331c..d62c247e 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -143,6 +143,7 @@ def prompt(
*,
path: Optional[str] = None,
model: Optional[str] = None,
+ attributes: Optional[dict[str, Any]] = None,
endpoint: Optional[ModelEndpoints] = None,
template: Optional[PromptKernelRequestTemplate] = None,
provider: Optional[ModelProviders] = None,
@@ -264,6 +265,7 @@ def call_llm(messages):
return prompt_decorator_factory(
path=path,
model=model,
+ attributes=attributes,
endpoint=endpoint,
template=template,
provider=provider,
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index a9de046f..088b550a 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,9 +1,10 @@
+import inspect
import uuid
from functools import wraps
from typing import Any, Callable, Optional
from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
@@ -16,6 +17,7 @@ def prompt(
path: Optional[str] = None,
# TODO: Template can be a list of objects?
model: Optional[str] = None,
+ attributes: Optional[dict[str, Any]] = None,
endpoint: Optional[ModelEndpoints] = None,
template: Optional[PromptKernelRequestTemplate] = None,
provider: Optional[ModelProviders] = None,
@@ -52,9 +54,6 @@ def decorator(func: Callable):
raise ValueError(f"{func.__name__}: Frequency penalty parameter must be between -2 and 2")
prompt_kernel["frequency_penalty"] = frequency_penalty
- for attr in [model, endpoint, template, provider, max_tokens, stop, other, seed, response_format]:
- if attr is not None:
- prompt_kernel[attr] = attr # type: ignore
for attr_name, attr_value in {
"model": model,
"endpoint": endpoint,
@@ -65,6 +64,7 @@ def decorator(func: Callable):
"other": other,
"seed": seed,
"response_format": response_format,
+ "attributes": attributes if attributes != {} else None,
}.items():
if attr_value is not None:
prompt_kernel[attr_name] = attr_value # type: ignore
@@ -102,7 +102,7 @@ def wrapper(*args, **kwargs):
# Values not specified in the decorator will be
# completed with the intercepted values from the
# Instrumentors for LLM providers
- "prompt": prompt_kernel or None, # noqa: F821
+ "prompt": prompt_kernel or HL_OT_EMPTY_VALUE, # noqa: F821
},
)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 93d0e34e..eee8925a 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,3 +1,4 @@
+import json
import logging
import typing
from queue import Queue
@@ -162,6 +163,12 @@ def _export_prompt(self, span: ReadableSpan) -> None:
trace_parent_id = None
prompt: PromptKernelRequestParams = file_object["prompt"]
path: str = file_object["path"]
+ if not isinstance(log_object["output"], str):
+ # Output expected to be a string, if decorated function
+ # does not return one, jsonify it
+ log_object["output"] = json.dumps(log_object["output"])
+ if "attributes" not in prompt or not prompt["attributes"]:
+ prompt["attributes"] = {}
response = self._client.prompts.log(
path=path,
prompt=prompt,
@@ -189,6 +196,10 @@ def _export_tool(self, span: ReadableSpan) -> None:
if tool.get("setup_values", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
tool["setup_values"] = {}
path: str = file_object["path"]
+ if not isinstance(log_object["output"], str):
+ # Output expected to be a string, if decorated function
+ # does not return one, jsonify it
+ log_object["output"] = json.dumps(log_object["output"])
response = self._client.tools.log(
path=path,
tool=tool,
@@ -219,6 +230,10 @@ def _export_flow(self, span: ReadableSpan) -> None:
else:
flow = file_object["flow"]
path: str = file_object["path"]
+ if not isinstance(log_object["output"], str):
+ # Output expected to be a string, if decorated function
+ # does not return one, jsonify it
+ log_object["output"] = json.dumps(log_object["output"])
response = self._client.flows.log(
path=path,
flow=flow,
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 9598c3ed..5513af93 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -92,6 +92,11 @@ def write_to_opentelemetry_span(
}
```
"""
+ # Remove all keys with the prefix to avoid duplicates
+ for attribute_key in span._attributes.keys(): # type: ignore
+ if attribute_key.startswith(key):
+ del span._attributes[attribute_key] # type: ignore
+
while len(work_stack) > 0:
key, value = work_stack.pop() # type: ignore
if isinstance(value, dict):
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 103cad54..168cb4b5 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -9,7 +9,7 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from pydantic import ValidationError as PydanticValidationError
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE
from humanloop.otel.helpers import (
is_humanloop_span,
is_llm_provider_call,
@@ -92,8 +92,8 @@ def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan
return
for child_span in children_spans:
if is_llm_provider_call(child_span):
- _enrich_prompt_span_file(prompt_span, child_span)
- _enrich_prompt_span_log(prompt_span, child_span)
+ _enrich_prompt_kernel(prompt_span, child_span)
+ _enrich_prompt_log(prompt_span, child_span)
# NOTE: @prompt decorator expects a single LLM provider call
# to happen in the function. If there are more than one, we
# ignore the rest
@@ -131,52 +131,30 @@ def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
)
-def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
+def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt = PromptKernelRequestParams(
- model=hl_file.get("prompt", {}).get("model"),
- endpoint=hl_file.get("prompt", {}).get("endpoint"),
- template=hl_file.get("prompt", {}).get("template"),
- provider=hl_file.get("prompt", {}).get("provider"),
- temperature=hl_file.get("prompt", {}).get("temperature"),
- max_tokens=hl_file.get("prompt", {}).get("max_tokens"),
- top_p=hl_file.get("prompt", {}).get("top_p"),
- stop=hl_file.get("prompt", {}).get("stop"),
- presence_penalty=hl_file.get("prompt", {}).get("presence_penalty"),
- frequency_penalty=hl_file.get("prompt", {}).get("frequency_penalty"),
- other=hl_file.get("prompt", {}).get("other"),
- seed=hl_file.get("prompt", {}).get("seed"),
- response_format=hl_file.get("prompt", {}).get("response_format"),
- tools=[],
- linked_tools=[],
- attributes={},
- )
- # Check if the keys were set via the @prompt decorator
- # Otherwise use the information from the intercepted LLM
- # provider call
- if not prompt["model"]:
- prompt["model"] = gen_ai_object.get("request", {}).get("model", None)
- if not prompt["endpoint"]:
- prompt["endpoint"] = llm_object.get("request", {}).get("type")
- if not prompt["provider"]:
- prompt["provider"] = gen_ai_object.get("system", None)
- if prompt["provider"]:
- # Normalize provider name; Interceptors output the names with
- # different capitalization e.g. OpenAI instead of openai
- prompt["provider"] = prompt["provider"].lower()
- if not prompt["temperature"]:
- prompt["temperature"] = gen_ai_object.get("request", {}).get("temperature", None)
- if not prompt["top_p"]:
- prompt["top_p"] = gen_ai_object.get("request", {}).get("top_p", None)
- if not prompt["max_tokens"]:
- prompt["max_tokens"] = gen_ai_object.get("request", {}).get("max_tokens", None)
- if not prompt["presence_penalty"]:
- prompt["presence_penalty"] = llm_object.get("presence_penalty", None)
- if not prompt["frequency_penalty"]:
- prompt["frequency_penalty"] = llm_object.get("frequency_penalty", None)
+ prompt: dict[str, Any] = hl_file.get("prompt") # type: ignore
+ if prompt == HL_OT_EMPTY_VALUE:
+ prompt = {}
+
+ # Check if the Prompt Kernel keys were assigned default values
+ # via the @prompt arguments. Otherwise use the information
+ # from the intercepted LLM provider call
+ prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None)
+ prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type")
+ prompt["provider"] = prompt.get("provider") or gen_ai_object.get("system", None)
+ if prompt["provider"]:
+ # Normalize provider name; Interceptors output the names with
+ # different capitalization e.g. OpenAI instead of openai
+ prompt["provider"] = prompt["provider"].lower()
+ prompt["temperature"] = prompt.get("temperature") or gen_ai_object.get("request", {}).get("temperature", None)
+ prompt["top_p"] = prompt.get("top_p") or gen_ai_object.get("request", {}).get("top_p", None)
+ prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None)
+ prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None)
+ prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None)
try:
# Validate the Prompt Kernel
@@ -194,7 +172,7 @@ def _enrich_prompt_span_file(prompt_span: ReadableSpan, llm_provider_call_span:
)
-def _enrich_prompt_span_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
+def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 980d8d29..4e4ae7eb 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -259,3 +259,55 @@ def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
)
# THEN an exception is raised
+
+
+@pytest.mark.parametrize(
+ "attributes_test_expected",
+ [
+ (
+ {"foo": "bar"},
+ {"foo": "bar"},
+ ),
+ (
+ {},
+ None,
+ ),
+ (
+ None,
+ None,
+ ),
+ ],
+)
+def test_prompt_attributes(
+ attributes_test_expected: tuple[dict[str, str], dict[str, str]],
+ call_llm_messages: list[ChatCompletionMessageParam],
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ test_attributes, expected_attributes = attributes_test_expected
+ _, exporter = opentelemetry_hl_test_configuration
+
+ @prompt(path=None, attributes=test_attributes)
+ def call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
+ load_dotenv()
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+
+ call_llm(call_llm_messages)
+
+ assert len(exporter.get_finished_spans()) == 2
+
+ prompt_kernel = PromptKernelRequest.model_validate(
+ read_from_opentelemetry_span(
+ span=exporter.get_finished_spans()[1],
+ key=HL_FILE_OT_KEY,
+ )["prompt"] # type: ignore
+ )
+ assert prompt_kernel.attributes == expected_attributes
From 789706eb6c8896ea5c6e1f21539064705d7df6ef Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 14:05:37 +0000
Subject: [PATCH 26/70] added retries on log attempts
---
src/humanloop/otel/exporter.py | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index eee8925a..f6860315 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -9,6 +9,7 @@
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
+from humanloop.core.request_options import RequestOptions
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
@@ -174,6 +175,7 @@ def _export_prompt(self, span: ReadableSpan) -> None:
prompt=prompt,
**log_object,
trace_parent_id=trace_parent_id,
+ request_options=RequestOptions(max_retries=3),
)
self._uploaded_log_ids[span.context.span_id] = response.id
@@ -205,6 +207,7 @@ def _export_tool(self, span: ReadableSpan) -> None:
tool=tool,
**log_object,
trace_parent_id=trace_parent_id,
+ request_options=RequestOptions(max_retries=3),
)
self._uploaded_log_ids[span.context.span_id] = response.id
@@ -239,5 +242,6 @@ def _export_flow(self, span: ReadableSpan) -> None:
flow=flow,
**log_object,
trace_parent_id=trace_parent_id,
+ request_options=RequestOptions(max_retries=3),
)
self._uploaded_log_ids[span.context.span_id] = response.id
From e2eda000a9d20a99edbac790af74834c5993856a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 14:11:14 +0000
Subject: [PATCH 27/70] Reverted unnecessary diff
---
src/humanloop/client.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index d62c247e..866205ce 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -62,9 +62,6 @@ def run(
class ExtendedPromptsClient(PromptsClient):
- def __init__(self, client_wrapper: SyncClientWrapper):
- super().__init__(client_wrapper=client_wrapper)
-
populate_template = staticmethod(populate_template)
From a95473dc4c541aa278c276f053b6fa82d13d15b9 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 14:13:14 +0000
Subject: [PATCH 28/70] Fixed imports in humanloop client module
---
src/humanloop/client.py | 26 +++++++++++++-------------
1 file changed, 13 insertions(+), 13 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 866205ce..316084c6 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,30 +1,30 @@
-import typing
-from typing import Any, Optional, List, Sequence
import os
+import typing
+from typing import Any, List, Optional, Sequence
+
import httpx
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.trace import Tracer
+from humanloop.types.model_endpoints import ModelEndpoints
+from humanloop.types.model_providers import ModelProviders
+from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
+from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
from humanloop.types.response_format import ResponseFormat
+from .base_client import AsyncBaseHumanloop, BaseHumanloop
from .decorators.flow import flow as flow_decorator_factory
from .decorators.prompt import prompt as prompt_decorator_factory
from .decorators.tool import tool as tool_decorator_factory
-from humanloop.core.client_wrapper import SyncClientWrapper
-from humanloop.types.model_endpoints import ModelEndpoints
-from humanloop.types.model_providers import ModelProviders
-from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
-from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
-from .otel.exporter import HumanloopSpanExporter
-from .otel.processor import HumanloopSpanProcessor
-from .otel import instrument_provider, set_humanloop_sdk_tracer
-from .base_client import BaseHumanloop, AsyncBaseHumanloop
from .environment import HumanloopEnvironment
-from .eval_utils import _run_eval, Dataset, File, Evaluator, EvaluatorCheck
-from .prompts.client import PromptsClient
+from .eval_utils import Dataset, Evaluator, EvaluatorCheck, File, _run_eval
from .evaluations.client import EvaluationsClient
+from .otel import instrument_provider, set_humanloop_sdk_tracer
+from .otel.exporter import HumanloopSpanExporter
+from .otel.processor import HumanloopSpanProcessor
from .prompt_utils import populate_template
+from .prompts.client import PromptsClient
class ExtendedEvalsClient(EvaluationsClient):
From 20a70aff4db4369d3cdb3d6ec64f0917c89a9858 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 17:03:55 +0000
Subject: [PATCH 29/70] Remove pyproject.toml ruff section
---
poetry.lock | 184 +++++++++++++++++++++----------------------------
pyproject.toml | 27 --------
2 files changed, 80 insertions(+), 131 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 54edca5a..95a95b1b 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1387,114 +1387,90 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "rpds-py"
-version = "0.20.0"
+version = "0.20.1"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.8"
files = [
- {file = "rpds_py-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3ad0fda1635f8439cde85c700f964b23ed5fc2d28016b32b9ee5fe30da5c84e2"},
- {file = "rpds_py-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9bb4a0d90fdb03437c109a17eade42dfbf6190408f29b2744114d11586611d6f"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6377e647bbfd0a0b159fe557f2c6c602c159fc752fa316572f012fc0bf67150"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb851b7df9dda52dc1415ebee12362047ce771fc36914586b2e9fcbd7d293b3e"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e0f80b739e5a8f54837be5d5c924483996b603d5502bfff79bf33da06164ee2"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a8c94dad2e45324fc74dce25e1645d4d14df9a4e54a30fa0ae8bad9a63928e3"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e604fe73ba048c06085beaf51147eaec7df856824bfe7b98657cf436623daf"},
- {file = "rpds_py-0.20.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df3de6b7726b52966edf29663e57306b23ef775faf0ac01a3e9f4012a24a4140"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf258ede5bc22a45c8e726b29835b9303c285ab46fc7c3a4cc770736b5304c9f"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:55fea87029cded5df854ca7e192ec7bdb7ecd1d9a3f63d5c4eb09148acf4a7ce"},
- {file = "rpds_py-0.20.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ae94bd0b2f02c28e199e9bc51485d0c5601f58780636185660f86bf80c89af94"},
- {file = "rpds_py-0.20.0-cp310-none-win32.whl", hash = "sha256:28527c685f237c05445efec62426d285e47a58fb05ba0090a4340b73ecda6dee"},
- {file = "rpds_py-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:238a2d5b1cad28cdc6ed15faf93a998336eb041c4e440dd7f902528b8891b399"},
- {file = "rpds_py-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac2f4f7a98934c2ed6505aead07b979e6f999389f16b714448fb39bbaa86a489"},
- {file = "rpds_py-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:220002c1b846db9afd83371d08d239fdc865e8f8c5795bbaec20916a76db3318"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d7919548df3f25374a1f5d01fbcd38dacab338ef5f33e044744b5c36729c8db"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:758406267907b3781beee0f0edfe4a179fbd97c0be2e9b1154d7f0a1279cf8e5"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3d61339e9f84a3f0767b1995adfb171a0d00a1185192718a17af6e124728e0f5"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1259c7b3705ac0a0bd38197565a5d603218591d3f6cee6e614e380b6ba61c6f6"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c1dc0f53856b9cc9a0ccca0a7cc61d3d20a7088201c0937f3f4048c1718a209"},
- {file = "rpds_py-0.20.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7e60cb630f674a31f0368ed32b2a6b4331b8350d67de53c0359992444b116dd3"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dbe982f38565bb50cb7fb061ebf762c2f254ca3d8c20d4006878766e84266272"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:514b3293b64187172bc77c8fb0cdae26981618021053b30d8371c3a902d4d5ad"},
- {file = "rpds_py-0.20.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d0a26ffe9d4dd35e4dfdd1e71f46401cff0181c75ac174711ccff0459135fa58"},
- {file = "rpds_py-0.20.0-cp311-none-win32.whl", hash = "sha256:89c19a494bf3ad08c1da49445cc5d13d8fefc265f48ee7e7556839acdacf69d0"},
- {file = "rpds_py-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:c638144ce971df84650d3ed0096e2ae7af8e62ecbbb7b201c8935c370df00a2c"},
- {file = "rpds_py-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a84ab91cbe7aab97f7446652d0ed37d35b68a465aeef8fc41932a9d7eee2c1a6"},
- {file = "rpds_py-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:56e27147a5a4c2c21633ff8475d185734c0e4befd1c989b5b95a5d0db699b21b"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2580b0c34583b85efec8c5c5ec9edf2dfe817330cc882ee972ae650e7b5ef739"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b80d4a7900cf6b66bb9cee5c352b2d708e29e5a37fe9bf784fa97fc11504bf6c"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50eccbf054e62a7b2209b28dc7a22d6254860209d6753e6b78cfaeb0075d7bee"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:49a8063ea4296b3a7e81a5dfb8f7b2d73f0b1c20c2af401fb0cdf22e14711a96"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea438162a9fcbee3ecf36c23e6c68237479f89f962f82dae83dc15feeceb37e4"},
- {file = "rpds_py-0.20.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:18d7585c463087bddcfa74c2ba267339f14f2515158ac4db30b1f9cbdb62c8ef"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d4c7d1a051eeb39f5c9547e82ea27cbcc28338482242e3e0b7768033cb083821"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4df1e3b3bec320790f699890d41c59d250f6beda159ea3c44c3f5bac1976940"},
- {file = "rpds_py-0.20.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2cf126d33a91ee6eedc7f3197b53e87a2acdac63602c0f03a02dd69e4b138174"},
- {file = "rpds_py-0.20.0-cp312-none-win32.whl", hash = "sha256:8bc7690f7caee50b04a79bf017a8d020c1f48c2a1077ffe172abec59870f1139"},
- {file = "rpds_py-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:0e13e6952ef264c40587d510ad676a988df19adea20444c2b295e536457bc585"},
- {file = "rpds_py-0.20.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:aa9a0521aeca7d4941499a73ad7d4f8ffa3d1affc50b9ea11d992cd7eff18a29"},
- {file = "rpds_py-0.20.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4a1f1d51eccb7e6c32ae89243cb352389228ea62f89cd80823ea7dd1b98e0b91"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a86a9b96070674fc88b6f9f71a97d2c1d3e5165574615d1f9168ecba4cecb24"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c8ef2ebf76df43f5750b46851ed1cdf8f109d7787ca40035fe19fbdc1acc5a7"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b74b25f024b421d5859d156750ea9a65651793d51b76a2e9238c05c9d5f203a9"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57eb94a8c16ab08fef6404301c38318e2c5a32216bf5de453e2714c964c125c8"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1940dae14e715e2e02dfd5b0f64a52e8374a517a1e531ad9412319dc3ac7879"},
- {file = "rpds_py-0.20.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d20277fd62e1b992a50c43f13fbe13277a31f8c9f70d59759c88f644d66c619f"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:06db23d43f26478303e954c34c75182356ca9aa7797d22c5345b16871ab9c45c"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2a5db5397d82fa847e4c624b0c98fe59d2d9b7cf0ce6de09e4d2e80f8f5b3f2"},
- {file = "rpds_py-0.20.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5a35df9f5548fd79cb2f52d27182108c3e6641a4feb0f39067911bf2adaa3e57"},
- {file = "rpds_py-0.20.0-cp313-none-win32.whl", hash = "sha256:fd2d84f40633bc475ef2d5490b9c19543fbf18596dcb1b291e3a12ea5d722f7a"},
- {file = "rpds_py-0.20.0-cp313-none-win_amd64.whl", hash = "sha256:9bc2d153989e3216b0559251b0c260cfd168ec78b1fac33dd485750a228db5a2"},
- {file = "rpds_py-0.20.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:f2fbf7db2012d4876fb0d66b5b9ba6591197b0f165db8d99371d976546472a24"},
- {file = "rpds_py-0.20.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e5f3cd7397c8f86c8cc72d5a791071431c108edd79872cdd96e00abd8497d29"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce9845054c13696f7af7f2b353e6b4f676dab1b4b215d7fe5e05c6f8bb06f965"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c3e130fd0ec56cb76eb49ef52faead8ff09d13f4527e9b0c400307ff72b408e1"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b16aa0107ecb512b568244ef461f27697164d9a68d8b35090e9b0c1c8b27752"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aa7f429242aae2947246587d2964fad750b79e8c233a2367f71b554e9447949c"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af0fc424a5842a11e28956e69395fbbeab2c97c42253169d87e90aac2886d751"},
- {file = "rpds_py-0.20.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b8c00a3b1e70c1d3891f0db1b05292747f0dbcfb49c43f9244d04c70fbc40eb8"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:40ce74fc86ee4645d0a225498d091d8bc61f39b709ebef8204cb8b5a464d3c0e"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4fe84294c7019456e56d93e8ababdad5a329cd25975be749c3f5f558abb48253"},
- {file = "rpds_py-0.20.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:338ca4539aad4ce70a656e5187a3a31c5204f261aef9f6ab50e50bcdffaf050a"},
- {file = "rpds_py-0.20.0-cp38-none-win32.whl", hash = "sha256:54b43a2b07db18314669092bb2de584524d1ef414588780261e31e85846c26a5"},
- {file = "rpds_py-0.20.0-cp38-none-win_amd64.whl", hash = "sha256:a1862d2d7ce1674cffa6d186d53ca95c6e17ed2b06b3f4c476173565c862d232"},
- {file = "rpds_py-0.20.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3fde368e9140312b6e8b6c09fb9f8c8c2f00999d1823403ae90cc00480221b22"},
- {file = "rpds_py-0.20.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9824fb430c9cf9af743cf7aaf6707bf14323fb51ee74425c380f4c846ea70789"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11ef6ce74616342888b69878d45e9f779b95d4bd48b382a229fe624a409b72c5"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c52d3f2f82b763a24ef52f5d24358553e8403ce05f893b5347098014f2d9eff2"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d35cef91e59ebbeaa45214861874bc6f19eb35de96db73e467a8358d701a96c"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d72278a30111e5b5525c1dd96120d9e958464316f55adb030433ea905866f4de"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4c29cbbba378759ac5786730d1c3cb4ec6f8ababf5c42a9ce303dc4b3d08cda"},
- {file = "rpds_py-0.20.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6632f2d04f15d1bd6fe0eedd3b86d9061b836ddca4c03d5cf5c7e9e6b7c14580"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d0b67d87bb45ed1cd020e8fbf2307d449b68abc45402fe1a4ac9e46c3c8b192b"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:ec31a99ca63bf3cd7f1a5ac9fe95c5e2d060d3c768a09bc1d16e235840861420"},
- {file = "rpds_py-0.20.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22e6c9976e38f4d8c4a63bd8a8edac5307dffd3ee7e6026d97f3cc3a2dc02a0b"},
- {file = "rpds_py-0.20.0-cp39-none-win32.whl", hash = "sha256:569b3ea770c2717b730b61998b6c54996adee3cef69fc28d444f3e7920313cf7"},
- {file = "rpds_py-0.20.0-cp39-none-win_amd64.whl", hash = "sha256:e6900ecdd50ce0facf703f7a00df12374b74bbc8ad9fe0f6559947fb20f82364"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:617c7357272c67696fd052811e352ac54ed1d9b49ab370261a80d3b6ce385045"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9426133526f69fcaba6e42146b4e12d6bc6c839b8b555097020e2b78ce908dcc"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deb62214c42a261cb3eb04d474f7155279c1a8a8c30ac89b7dcb1721d92c3c02"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcaeb7b57f1a1e071ebd748984359fef83ecb026325b9d4ca847c95bc7311c92"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d454b8749b4bd70dd0a79f428731ee263fa6995f83ccb8bada706e8d1d3ff89d"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d807dc2051abe041b6649681dce568f8e10668e3c1c6543ebae58f2d7e617855"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c20f0ddeb6e29126d45f89206b8291352b8c5b44384e78a6499d68b52ae511"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b7f19250ceef892adf27f0399b9e5afad019288e9be756d6919cb58892129f51"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1ed4749a08379555cebf4650453f14452eaa9c43d0a95c49db50c18b7da075"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:dcedf0b42bcb4cfff4101d7771a10532415a6106062f005ab97d1d0ab5681c60"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:39ed0d010457a78f54090fafb5d108501b5aa5604cc22408fc1c0c77eac14344"},
- {file = "rpds_py-0.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bb273176be34a746bdac0b0d7e4e2c467323d13640b736c4c477881a3220a989"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f918a1a130a6dfe1d7fe0f105064141342e7dd1611f2e6a21cd2f5c8cb1cfb3e"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f60012a73aa396be721558caa3a6fd49b3dd0033d1675c6d59c4502e870fcf0c"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d2b1ad682a3dfda2a4e8ad8572f3100f95fad98cb99faf37ff0ddfe9cbf9d03"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:614fdafe9f5f19c63ea02817fa4861c606a59a604a77c8cdef5aa01d28b97921"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fa518bcd7600c584bf42e6617ee8132869e877db2f76bcdc281ec6a4113a53ab"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0475242f447cc6cb8a9dd486d68b2ef7fbee84427124c232bff5f63b1fe11e5"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90a4cd061914a60bd51c68bcb4357086991bd0bb93d8aa66a6da7701370708f"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:def7400461c3a3f26e49078302e1c1b38f6752342c77e3cf72ce91ca69fb1bc1"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:65794e4048ee837494aea3c21a28ad5fc080994dfba5b036cf84de37f7ad5074"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:faefcc78f53a88f3076b7f8be0a8f8d35133a3ecf7f3770895c25f8813460f08"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5b4f105deeffa28bbcdff6c49b34e74903139afa690e35d2d9e3c2c2fba18cec"},
- {file = "rpds_py-0.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdfc3a892927458d98f3d55428ae46b921d1f7543b89382fdb483f5640daaec8"},
- {file = "rpds_py-0.20.0.tar.gz", hash = "sha256:d72a210824facfdaf8768cf2d7ca25a042c30320b3020de2fa04640920d4e121"},
+ {file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"},
+ {file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14511a539afee6f9ab492b543060c7491c99924314977a55c98bfa2ee29ce78c"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ccb8ac2d3c71cda472b75af42818981bdacf48d2e21c36331b50b4f16930163"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c142b88039b92e7e0cb2552e8967077e3179b22359e945574f5e2764c3953dcf"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f19169781dddae7478a32301b499b2858bc52fc45a112955e798ee307e294977"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13c56de6518e14b9bf6edde23c4c39dac5b48dcf04160ea7bce8fca8397cdf86"},
+ {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:925d176a549f4832c6f69fa6026071294ab5910e82a0fe6c6228fce17b0706bd"},
+ {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78f0b6877bfce7a3d1ff150391354a410c55d3cdce386f862926a4958ad5ab7e"},
+ {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dd645e2b0dcb0fd05bf58e2e54c13875847687d0b71941ad2e757e5d89d4356"},
+ {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f676e21db2f8c72ff0936f895271e7a700aa1f8d31b40e4e43442ba94973899"},
+ {file = "rpds_py-0.20.1-cp310-none-win32.whl", hash = "sha256:648386ddd1e19b4a6abab69139b002bc49ebf065b596119f8f37c38e9ecee8ff"},
+ {file = "rpds_py-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:d9ecb51120de61e4604650666d1f2b68444d46ae18fd492245a08f53ad2b7711"},
+ {file = "rpds_py-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:762703bdd2b30983c1d9e62b4c88664df4a8a4d5ec0e9253b0231171f18f6d75"},
+ {file = "rpds_py-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0b581f47257a9fce535c4567782a8976002d6b8afa2c39ff616edf87cbeff712"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:842c19a6ce894493563c3bd00d81d5100e8e57d70209e84d5491940fdb8b9e3a"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42cbde7789f5c0bcd6816cb29808e36c01b960fb5d29f11e052215aa85497c93"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c8e9340ce5a52f95fa7d3b552b35c7e8f3874d74a03a8a69279fd5fca5dc751"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba6f89cac95c0900d932c9efb7f0fb6ca47f6687feec41abcb1bd5e2bd45535"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a916087371afd9648e1962e67403c53f9c49ca47b9680adbeef79da3a7811b0"},
+ {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:200a23239781f46149e6a415f1e870c5ef1e712939fe8fa63035cd053ac2638e"},
+ {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58b1d5dd591973d426cbb2da5e27ba0339209832b2f3315928c9790e13f159e8"},
+ {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6b73c67850ca7cae0f6c56f71e356d7e9fa25958d3e18a64927c2d930859b8e4"},
+ {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d8761c3c891cc51e90bc9926d6d2f59b27beaf86c74622c8979380a29cc23ac3"},
+ {file = "rpds_py-0.20.1-cp311-none-win32.whl", hash = "sha256:cd945871335a639275eee904caef90041568ce3b42f402c6959b460d25ae8732"},
+ {file = "rpds_py-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:7e21b7031e17c6b0e445f42ccc77f79a97e2687023c5746bfb7a9e45e0921b84"},
+ {file = "rpds_py-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:36785be22066966a27348444b40389f8444671630063edfb1a2eb04318721e17"},
+ {file = "rpds_py-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:142c0a5124d9bd0e2976089484af5c74f47bd3298f2ed651ef54ea728d2ea42c"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbddc10776ca7ebf2a299c41a4dde8ea0d8e3547bfd731cb87af2e8f5bf8962d"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15a842bb369e00295392e7ce192de9dcbf136954614124a667f9f9f17d6a216f"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be5ef2f1fc586a7372bfc355986226484e06d1dc4f9402539872c8bb99e34b01"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbcf360c9e3399b056a238523146ea77eeb2a596ce263b8814c900263e46031a"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd27a66740ffd621d20b9a2f2b5ee4129a56e27bfb9458a3bcc2e45794c96cb"},
+ {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0b937b2a1988f184a3e9e577adaa8aede21ec0b38320d6009e02bd026db04fa"},
+ {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6889469bfdc1eddf489729b471303739bf04555bb151fe8875931f8564309afc"},
+ {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:19b73643c802f4eaf13d97f7855d0fb527fbc92ab7013c4ad0e13a6ae0ed23bd"},
+ {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c6afcf2338e7f374e8edc765c79fbcb4061d02b15dd5f8f314a4af2bdc7feb5"},
+ {file = "rpds_py-0.20.1-cp312-none-win32.whl", hash = "sha256:dc73505153798c6f74854aba69cc75953888cf9866465196889c7cdd351e720c"},
+ {file = "rpds_py-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:8bbe951244a838a51289ee53a6bae3a07f26d4e179b96fc7ddd3301caf0518eb"},
+ {file = "rpds_py-0.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6ca91093a4a8da4afae7fe6a222c3b53ee4eef433ebfee4d54978a103435159e"},
+ {file = "rpds_py-0.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b9c2fe36d1f758b28121bef29ed1dee9b7a2453e997528e7d1ac99b94892527c"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f009c69bc8c53db5dfab72ac760895dc1f2bc1b62ab7408b253c8d1ec52459fc"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6740a3e8d43a32629bb9b009017ea5b9e713b7210ba48ac8d4cb6d99d86c8ee8"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32b922e13d4c0080d03e7b62991ad7f5007d9cd74e239c4b16bc85ae8b70252d"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe00a9057d100e69b4ae4a094203a708d65b0f345ed546fdef86498bf5390982"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fe9b04b6fa685bd39237d45fad89ba19e9163a1ccaa16611a812e682913496"},
+ {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa7ac11e294304e615b43f8c441fee5d40094275ed7311f3420d805fde9b07b4"},
+ {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aa97af1558a9bef4025f8f5d8c60d712e0a3b13a2fe875511defc6ee77a1ab7"},
+ {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:483b29f6f7ffa6af845107d4efe2e3fa8fb2693de8657bc1849f674296ff6a5a"},
+ {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37fe0f12aebb6a0e3e17bb4cd356b1286d2d18d2e93b2d39fe647138458b4bcb"},
+ {file = "rpds_py-0.20.1-cp313-none-win32.whl", hash = "sha256:a624cc00ef2158e04188df5e3016385b9353638139a06fb77057b3498f794782"},
+ {file = "rpds_py-0.20.1-cp313-none-win_amd64.whl", hash = "sha256:b71b8666eeea69d6363248822078c075bac6ed135faa9216aa85f295ff009b1e"},
+ {file = "rpds_py-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5b48e790e0355865197ad0aca8cde3d8ede347831e1959e158369eb3493d2191"},
+ {file = "rpds_py-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3e310838a5801795207c66c73ea903deda321e6146d6f282e85fa7e3e4854804"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249280b870e6a42c0d972339e9cc22ee98730a99cd7f2f727549af80dd5a963"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e79059d67bea28b53d255c1437b25391653263f0e69cd7dec170d778fdbca95e"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b431c777c9653e569986ecf69ff4a5dba281cded16043d348bf9ba505486f36"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da584ff96ec95e97925174eb8237e32f626e7a1a97888cdd27ee2f1f24dd0ad8"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a0629ec053fc013808a85178524e3cb63a61dbc35b22499870194a63578fb9"},
+ {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fbf15aff64a163db29a91ed0868af181d6f68ec1a3a7d5afcfe4501252840bad"},
+ {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:07924c1b938798797d60c6308fa8ad3b3f0201802f82e4a2c41bb3fafb44cc28"},
+ {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4a5a844f68776a7715ecb30843b453f07ac89bad393431efbf7accca3ef599c1"},
+ {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:518d2ca43c358929bf08f9079b617f1c2ca6e8848f83c1225c88caeac46e6cbc"},
+ {file = "rpds_py-0.20.1-cp38-none-win32.whl", hash = "sha256:3aea7eed3e55119635a74bbeb80b35e776bafccb70d97e8ff838816c124539f1"},
+ {file = "rpds_py-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:7dca7081e9a0c3b6490a145593f6fe3173a94197f2cb9891183ef75e9d64c425"},
+ {file = "rpds_py-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b41b6321805c472f66990c2849e152aff7bc359eb92f781e3f606609eac877ad"},
+ {file = "rpds_py-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a90c373ea2975519b58dece25853dbcb9779b05cc46b4819cb1917e3b3215b6"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d4477bcb9fbbd7b5b0e4a5d9b493e42026c0bf1f06f723a9353f5153e75d30"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b8382a90539910b53a6307f7c35697bc7e6ffb25d9c1d4e998a13e842a5e83"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4888e117dd41b9d34194d9e31631af70d3d526efc363085e3089ab1a62c32ed1"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5265505b3d61a0f56618c9b941dc54dc334dc6e660f1592d112cd103d914a6db"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e75ba609dba23f2c95b776efb9dd3f0b78a76a151e96f96cc5b6b1b0004de66f"},
+ {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1791ff70bc975b098fe6ecf04356a10e9e2bd7dc21fa7351c1742fdeb9b4966f"},
+ {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d126b52e4a473d40232ec2052a8b232270ed1f8c9571aaf33f73a14cc298c24f"},
+ {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c14937af98c4cc362a1d4374806204dd51b1e12dded1ae30645c298e5a5c4cb1"},
+ {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3d089d0b88996df627693639d123c8158cff41c0651f646cd8fd292c7da90eaf"},
+ {file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"},
+ {file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index da6c24c3..4339dd85 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -73,33 +73,6 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
-[tool.ruff.lint]
-select = [
- "E", # pycodestyle errors
- "W", # pycodestyle warnings
- "F", # pyflakes
- "I", # isort
- "C", # flake8-comprehensions
- "B", # flake8-bugbear
- "UP", # pyupgrade
- "DTZ", # unsafe naive datetime
- "PL", # pylint
-]
-ignore = [
- "B904", # raise without from inside except, TODO: turn back on
- "E501", # line too long, handled by ruff formatter
- "UP015", # redundant open modes
- "B008", # do not perform function calls in argument defaults
- "C901", # too complex
- "PLR0912", # too many branches
- "PLR0913", # too many arguments
- "PLR0911", # too many return statements
- "PLR0915", # too many statements,
- "PLR2004", # magic value comparison
- "PLR5501", # 'elif' instead of 'else' then 'if'
- "PLE1205", # too many arguments for format string
-]
-
[build-system]
requires = ["poetry-core"]
From e1a33895b857d18223ee2d43d55a124e4829f54e Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 18:11:49 +0000
Subject: [PATCH 30/70] Removed global tracer; now owned by client
---
src/humanloop/client.py | 17 +-
src/humanloop/decorators/flow.py | 7 +-
src/humanloop/decorators/prompt.py | 8 +-
src/humanloop/decorators/tool.py | 7 +-
src/humanloop/otel/__init__.py | 22 --
tests/conftest.py | 16 +-
tests/decorators/test_flow_decorator.py | 91 ++++---
tests/decorators/test_prompt_decorator.py | 282 +++++++++++-----------
tests/decorators/test_tool_decorator.py | 128 +++++++---
9 files changed, 329 insertions(+), 249 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 316084c6..7550be35 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -20,7 +20,7 @@
from .environment import HumanloopEnvironment
from .eval_utils import Dataset, Evaluator, EvaluatorCheck, File, _run_eval
from .evaluations.client import EvaluationsClient
-from .otel import instrument_provider, set_humanloop_sdk_tracer
+from .otel import instrument_provider
from .otel.exporter import HumanloopSpanExporter
from .otel.processor import HumanloopSpanProcessor
from .prompt_utils import populate_template
@@ -124,11 +124,10 @@ def __init__(
),
)
- if opentelemetry_tracer is not None:
- set_humanloop_sdk_tracer(opentelemetry_tracer)
+ if opentelemetry_tracer is None:
+ self._opentelemetry_tracer = self._tracer_provider.get_tracer("humanloop.sdk")
else:
- tracer = self._tracer_provider.get_tracer("humanloop.sdk")
- set_humanloop_sdk_tracer(tracer)
+ self._opentelemetry_tracer = opentelemetry_tracer
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
@@ -260,6 +259,7 @@ def call_llm(messages):
for chat.
"""
return prompt_decorator_factory(
+ opentelemetry_tracer=self._opentelemetry_tracer,
path=path,
model=model,
attributes=attributes,
@@ -351,6 +351,7 @@ def calculator(a: int, b: Optional[int]) -> int:
with details on how they were created or used.
"""
return tool_decorator_factory(
+ opentelemetry_tracer=self._opentelemetry_tracer,
path=path,
setup_values=setup_values,
attributes=attributes,
@@ -408,7 +409,11 @@ def entrypoint():
"""
if attributes is None:
attributes = {}
- return flow_decorator_factory(path=path, attributes=attributes)
+ return flow_decorator_factory(
+ opentelemetry_tracer=self._opentelemetry_tracer,
+ path=path,
+ attributes=attributes,
+ )
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 1d17e905..e28cb5bd 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -2,13 +2,16 @@
from functools import wraps
from typing import Any, Callable, Optional
+from opentelemetry.trace import Tracer
+
from humanloop.decorators.helpers import args_to_inputs
-from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
+from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
def flow(
+ opentelemetry_tracer: Tracer,
path: Optional[str] = None,
attributes: Optional[dict[str, Any]] = None,
):
@@ -18,7 +21,7 @@ def flow(
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
trace_metadata = get_trace_parent_metadata()
if trace_metadata:
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 088b550a..9b6305e9 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,9 +1,10 @@
-import inspect
import uuid
from functools import wraps
from typing import Any, Callable, Optional
-from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
+from opentelemetry.trace import Tracer
+
+from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
@@ -14,6 +15,7 @@
def prompt(
+ opentelemetry_tracer: Tracer,
path: Optional[str] = None,
# TODO: Template can be a list of objects?
model: Optional[str] = None,
@@ -71,7 +73,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
trace_metadata = get_trace_parent_metadata()
if trace_metadata:
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 30a26c8c..0334b0a7 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -6,7 +6,9 @@
from functools import wraps
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
-from humanloop.otel import get_humanloop_sdk_tracer, get_trace_parent_metadata, pop_trace_context, push_trace_context
+from opentelemetry.trace import Tracer
+
+from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
@@ -16,6 +18,7 @@
def tool(
+ opentelemetry_tracer: Tracer,
path: Optional[str] = None,
setup_values: Optional[dict[str, Optional[Any]]] = None,
attributes: Optional[dict[str, typing.Any]] = None,
@@ -34,7 +37,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with get_humanloop_sdk_tracer().start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
trace_metadata = get_trace_parent_metadata()
if trace_metadata:
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index ee5073f4..e184c024 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -3,20 +3,10 @@
from opentelemetry import baggage
from opentelemetry.context import Context
from opentelemetry.sdk.trace import TracerProvider
-from opentelemetry.trace import Tracer
from humanloop.otel.constants import HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import module_is_installed
-"""
-Tracer to which Humanloop decorators will write Spans.
-Humanloop SDK will instantiate one for the decorators
-if the user does not provide a Tracer in the Humanloop
-client.
-"""
-_TRACER = None
-
-
"""
Humanloop SDK uses the Baggage concept from OTel
to store the Trace metadata. Read more here:
@@ -35,18 +25,6 @@
_BAGGAGE_CONTEXT_STACK: list[Context] = [Context()]
-def set_humanloop_sdk_tracer(tracer: Tracer):
- """Set Tracer used by Humanloop SDK to instrument the decorators."""
- global _TRACER # noqa: PLW0603
- _TRACER = tracer
-
-
-def get_humanloop_sdk_tracer() -> Tracer:
- """Get Tracer used by Humanloop SDK to instrument the decorators."""
- assert _TRACER is not None, "Internal error: OTT Tracer should have been set in the client"
- return _TRACER
-
-
def instrument_provider(provider: TracerProvider):
"""Add Instrumentors to the TracerProvider.
diff --git a/tests/conftest.py b/tests/conftest.py
index 4a4f5137..ccdaa81d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,4 @@
-from typing import Generator
+from typing import Callable, Generator
from unittest.mock import MagicMock
import pytest
@@ -16,7 +16,9 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-from humanloop import otel as INTERNAL_OT
+from humanloop.decorators.flow import flow
+from humanloop.decorators.prompt import prompt
+from humanloop.decorators.tool import tool
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
@@ -71,13 +73,11 @@ def opentelemetry_test_configuration(
instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
tracer = opentelemetry_test_provider.get_tracer("test")
# Circumvent configuration procedure
- INTERNAL_OT._TRACER = tracer
yield tracer, exporter
for instrumentor in instrumentors:
instrumentor.uninstrument()
- INTERNAL_OT._TRACER = None
@pytest.fixture(scope="function")
@@ -101,15 +101,15 @@ def opentelemetry_hl_test_configuration(
AnthropicInstrumentor(),
]
for instrumentor in instrumentors:
- instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
+ instrumentor.instrument(
+ tracer_provider=opentelemetry_test_provider,
+ )
tracer = opentelemetry_test_provider.get_tracer("test")
- INTERNAL_OT._TRACER = tracer
yield tracer, exporter
for instrumentor in instrumentors:
instrumentor.uninstrument()
- INTERNAL_OT._TRACER = None
@pytest.fixture(scope="function")
@@ -136,12 +136,10 @@ def opentelemetry_hl_with_exporter_test_configuration(
instrumentor = OpenAIInstrumentor()
instrumentor.instrument(tracer_provider=opentelemetry_test_provider)
tracer = opentelemetry_test_provider.get_tracer("test")
- INTERNAL_OT._TRACER = tracer
yield tracer, hl_test_exporter
instrumentor.uninstrument()
- INTERNAL_OT._TRACER = None
@pytest.fixture(scope="session")
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 18e56208..634b3786 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -17,46 +17,60 @@
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-@tool()
-def _random_string() -> str:
- """Return a random string."""
- return "".join(
- random.choices(
- string.ascii_letters + string.digits,
- k=10,
- )
- )
-
-
-@prompt(path=None, template="You are an assistant on the following topics: {topics}.")
-def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- return (
- client.chat.completions.create(
- model="gpt-4o",
- messages=messages,
- temperature=0.8,
+def _test_scenario(
+ opentelemetry_tracer: Tracer,
+):
+ @tool(opentelemetry_tracer=opentelemetry_tracer)
+ def _random_string() -> str:
+ """Return a random string."""
+ return "".join(
+ random.choices(
+ string.ascii_letters + string.digits,
+ k=10,
+ )
)
- .choices[0]
- .message.content
- ) + _random_string()
+ @prompt(
+ opentelemetry_tracer=opentelemetry_tracer,
+ path=None,
+ template="You are an assistant on the following topics: {topics}.",
+ )
+ def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
+ return (
+ client.chat.completions.create(
+ model="gpt-4o",
+ messages=messages,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ ) + _random_string()
-@flow(attributes={"foo": "bar", "baz": 7})
-def _agent_call(messages: list[dict]) -> str:
- return _call_llm(messages=messages)
+ @flow(
+ opentelemetry_tracer=opentelemetry_tracer,
+ attributes={"foo": "bar", "baz": 7},
+ )
+ def _agent_call(messages: list[dict]) -> str:
+ return _call_llm(messages=messages)
+ @flow(
+ opentelemetry_tracer=opentelemetry_tracer,
+ )
+ def _flow_over_flow(messages: list[dict]) -> str:
+ return _agent_call(messages=messages)
-@flow()
-def _flow_over_flow(messages: list[dict]) -> str:
- return _agent_call(messages=messages)
+ return _random_string, _call_llm, _agent_call, _flow_over_flow
def test_decorators_without_flow(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
+ tracer, exporter = opentelemetry_hl_test_configuration
+
+ _call_llm = _test_scenario(tracer)[1]
+
# GIVEN a call to @prompt annotated function that calls a @tool
- _, exporter = opentelemetry_hl_test_configuration
_call_llm(
[
{
@@ -94,7 +108,10 @@ def test_decorators_with_flow_decorator(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
# GIVEN a @flow entrypoint to an instrumented application
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_hl_test_configuration
+
+ _agent_call = _test_scenario(tracer)[2]
+
# WHEN calling the Flow
_agent_call(
[
@@ -137,7 +154,9 @@ def test_flow_decorator_flow_in_flow(
call_llm_messages: list[dict],
):
# GIVEN A configured OpenTelemetry tracer and exporter
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_hl_test_configuration
+
+ _flow_over_flow = _test_scenario(tracer)[3]
# WHEN Calling the _test_flow_in_flow function with specific messages
_flow_over_flow(call_llm_messages)
@@ -181,7 +200,10 @@ def test_flow_decorator_with_hl_exporter(
):
# NOTE: type ignore comments are caused by the MagicMock used to mock _client
# GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter
- _, exporter = opentelemetry_hl_with_exporter_test_configuration
+ tracer, exporter = opentelemetry_hl_with_exporter_test_configuration
+
+ _agent_call = _test_scenario(tracer)[2]
+
with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
# WHEN calling the @flow decorated function
_agent_call(call_llm_messages)
@@ -246,7 +268,10 @@ def test_flow_decorator_hl_exporter_flow_inside_flow(
opentelemetry_hl_with_exporter_test_configuration: tuple[Tracer, HumanloopSpanExporter],
):
# GIVEN a OpenTelemetry configuration with a mock Humanloop SDK and a spied exporter
- _, exporter = opentelemetry_hl_with_exporter_test_configuration
+ tracer, exporter = opentelemetry_hl_with_exporter_test_configuration
+
+ _flow_over_flow = _test_scenario(tracer)[3]
+
with patch.object(exporter, "export", wraps=exporter.export) as mock_export_method:
# WHEN calling the @flow decorated function
_flow_over_flow(call_llm_messages)
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 4e4ae7eb..b8c4b80d 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -33,110 +33,109 @@
]
-def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]:
- load_dotenv()
- if provider == "openai":
- # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
- # provider calls. Could not find a way to intercept them coming from a Mock.
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore
- return (
- client.chat.completions.create(
- model=model,
- messages=messages, # type: ignore
- temperature=0.8,
- )
- .choices[0]
- .message.content
- )
- if provider == "anthropic":
- client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore
- messages_anthropic_format = [
- MessageParam(
- content=message["content"],
- role="user" if message["role"] in ("user", "system") else "assistant",
- )
- for message in messages
- ]
- return (
- client.messages.create( # type: ignore
- model=model,
- messages=messages_anthropic_format,
- max_tokens=200,
- temperature=0.8,
- )
- .content[0]
- .text
- )
- if provider == "groq":
- try:
- client = Groq( # type: ignore
- # This is the default and can be omitted
- api_key=os.environ.get("GROQ_API_KEY"),
- )
+def _test_scenario(opentelemetry_tracer: Tracer, **kwargs):
+ """
+ Set up the function decorated with @prompt.
+
+ Normally the opentelemetry_tracer would be passed in by the Humanloop client.
+ In a test environment, the Tracer is obtained from a fixture and the test
+ call this function to setup the decorated function that is tested.
+ """
+
+ @prompt(opentelemetry_tracer=opentelemetry_tracer, **kwargs)
+ def _call_llm_base(provider: ModelProviders, model: str, messages: list[dict]) -> Optional[str]:
+ load_dotenv()
+ if provider == "openai":
+ # NOTE: These tests check if instrumentors are capable of intercepting OpenAI
+ # provider calls. Could not find a way to intercept them coming from a Mock.
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY")) # type: ignore
return (
client.chat.completions.create(
- messages=messages, # type: ignore
model=model,
+ messages=messages, # type: ignore
temperature=0.8,
)
.choices[0]
.message.content
)
- except GroqNotFoundError:
- # NOTE: Tests in this file are integration tests that rely on live LLM provider
- # clients. If a test fails, it might be flaky. If this happens, consider adding
- # a skip mechanism similar to Groq
- pytest.skip("GROQ not available")
- if provider == "cohere":
- client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore
- messages_cohere_format: list[cohere.Message] = []
- for message in messages:
- if message["role"] == "system":
- messages_cohere_format.append(cohere.SystemMessage(message=message["content"]))
- elif message["role"] == "user":
- messages_cohere_format.append(cohere.UserMessage(message=message["content"]))
- elif message["role"] == "assistant":
- messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"]))
- return client.chat( # type: ignore
- chat_history=messages_cohere_format,
- model=model,
- max_tokens=200,
- message=messages[-1]["content"],
- temperature=0.8,
- ).text
- if provider == "replicate":
- # TODO: Instrumentor only picks up methods on module-level, not client level
- # This should be documented somewhere or changed
- replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY")
- try:
- output = ""
- for event in replicate.run(
- model,
- input={
- "prompt": messages[0]["content"] + " " + messages[-1]["content"],
- "temperature": 0.8,
- },
- ):
- output += str(event)
- except ReplicateModelError:
- pytest.skip("Replicate not available")
- if not output:
- pytest.skip("Replicate not available")
- return output
- raise ValueError(f"Unknown provider: {provider}")
-
-
-# NOTE: prompt is a decorator, but for brevity, it's used as a higher-order function in tests
-_call_llm = prompt(
- path=None,
- template="You are an assistant on the following topics: {topics}.",
-)(_call_llm_base)
-_call_llm_with_defaults = prompt(
- path=None,
- template="You are an assistant on the following topics: {topics}.",
- temperature=0.9,
- top_p=0.1,
-)(_call_llm_base)
+ if provider == "anthropic":
+ client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # type: ignore
+ messages_anthropic_format = [
+ MessageParam(
+ content=message["content"],
+ role="user" if message["role"] in ("user", "system") else "assistant",
+ )
+ for message in messages
+ ]
+ return (
+ client.messages.create( # type: ignore
+ model=model,
+ messages=messages_anthropic_format,
+ max_tokens=200,
+ temperature=0.8,
+ )
+ .content[0]
+ .text
+ )
+ if provider == "groq":
+ try:
+ client = Groq( # type: ignore
+ # This is the default and can be omitted
+ api_key=os.environ.get("GROQ_API_KEY"),
+ )
+ return (
+ client.chat.completions.create(
+ messages=messages, # type: ignore
+ model=model,
+ temperature=0.8,
+ )
+ .choices[0]
+ .message.content
+ )
+ except GroqNotFoundError:
+ # NOTE: Tests in this file are integration tests that rely on live LLM provider
+ # clients. If a test fails, it might be flaky. If this happens, consider adding
+ # a skip mechanism similar to Groq
+ pytest.skip("GROQ not available")
+ if provider == "cohere":
+ client = cohere.Client(api_key=os.getenv("COHERE_API_KEY")) # type: ignore
+ messages_cohere_format: list[cohere.Message] = []
+ for message in messages:
+ if message["role"] == "system":
+ messages_cohere_format.append(cohere.SystemMessage(message=message["content"]))
+ elif message["role"] == "user":
+ messages_cohere_format.append(cohere.UserMessage(message=message["content"]))
+ elif message["role"] == "assistant":
+ messages_cohere_format.append(cohere.ChatbotMessage(message=message["content"]))
+ return client.chat( # type: ignore
+ chat_history=messages_cohere_format,
+ model=model,
+ max_tokens=200,
+ message=messages[-1]["content"],
+ temperature=0.8,
+ ).text
+ if provider == "replicate":
+ # TODO: Instrumentor only picks up methods on module-level, not client level
+ # This should be documented somewhere or changed
+ replicate.default_client._api_token = os.getenv("REPLICATE_API_KEY")
+ try:
+ output = ""
+ for event in replicate.run(
+ model,
+ input={
+ "prompt": messages[0]["content"] + " " + messages[-1]["content"],
+ "temperature": 0.8,
+ },
+ ):
+ output += str(event)
+ except ReplicateModelError:
+ pytest.skip("Replicate not available")
+ if not output:
+ pytest.skip("Replicate not available")
+ return output
+ raise ValueError(f"Unknown provider: {provider}")
+
+ return _call_llm_base
@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
@@ -147,9 +146,12 @@ def test_prompt_decorator(
):
provider, model = provider_model
# GIVEN an OpenTelemetry configuration without HumanloopSpanProcessor
- _, exporter = opentelemetry_test_configuration
+ tracer, exporter = opentelemetry_test_configuration
# WHEN using the Prompt decorator
- _call_llm(
+
+ call_llm = _test_scenario(tracer)
+
+ call_llm(
provider=provider,
model=model,
messages=call_llm_messages,
@@ -173,9 +175,12 @@ def test_prompt_decorator_with_hl_processor(
):
provider, model = provider_model
# GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator
- _call_llm(
+
+ call_llm = _test_scenario(opentelemetry_tracer=tracer)
+
+ call_llm(
provider=provider,
model=model,
messages=call_llm_messages,
@@ -186,20 +191,20 @@ def test_prompt_decorator_with_hl_processor(
assert not is_humanloop_span(span=spans[0])
assert is_humanloop_span(span=spans[1])
# THEN the Prompt span is enhanced with information and forms a correct PromptKernel
- prompt = PromptKernelRequest.model_validate(
+ prompt_kernel = PromptKernelRequest.model_validate(
read_from_opentelemetry_span(
span=spans[1],
key=HL_FILE_OT_KEY,
)["prompt"] # type: ignore
)
# THEN temperature is intercepted from LLM provider call
- assert prompt.temperature == 0.8
+ assert prompt_kernel.temperature == 0.8
# THEN the provider intercepted from LLM provider call
- assert prompt.provider == provider
+ assert prompt_kernel.provider == provider
# THEN model is intercepted from LLM provider call
- assert prompt.model == model
+ assert prompt_kernel.model == model
# THEN top_p is not present since it's not present in the LLM provider call
- assert prompt.top_p is None
+ assert prompt_kernel.top_p is None
@pytest.mark.parametrize("provider_model", _PROVIDER_AND_MODEL)
@@ -210,9 +215,18 @@ def test_prompt_decorator_with_defaults(
):
provider, model = provider_model
# GIVEN an OpenTelemetry configuration with HumanloopSpanProcessor
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_hl_test_configuration
# WHEN using the Prompt decorator with default values
- _call_llm_with_defaults(
+
+ call_llm = _test_scenario(
+ opentelemetry_tracer=tracer,
+ temperature=0.9,
+ top_p=0.1,
+ template="You are an assistant on the following topics: {topics}.",
+ path=None,
+ )
+
+ call_llm(
provider=provider,
model=model,
messages=call_llm_messages,
@@ -239,24 +253,22 @@ def test_prompt_decorator_with_defaults(
{"frequency_penalty": 3},
),
)
-def test_hyperparameter_values_fail_out_of_domain(hyperparameters: dict[str, float]):
+def test_hyperparameter_values_fail_out_of_domain(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ hyperparameters: dict[str, float],
+):
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a Prompt decorated function
with pytest.raises(ValueError):
- # WHEN using default values that are out of domain
- @prompt(path=None, template="You are an assistant on the following topics: {topics}.", **hyperparameters) # type: ignore[arg-type]
- def _call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
- load_dotenv()
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- return (
- client.chat.completions.create(
- model="gpt-4o",
- messages=messages,
- temperature=0.8,
- )
- .choices[0]
- .message.content
- )
+ # WHEN passing default values to the @prompt decorator that are out of domain
+
+ _test_scenario(
+ opentelemetry_tracer=tracer,
+ path=None,
+ **hyperparameters,
+ )
# THEN an exception is raised
@@ -284,23 +296,19 @@ def test_prompt_attributes(
opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
test_attributes, expected_attributes = attributes_test_expected
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_hl_test_configuration
- @prompt(path=None, attributes=test_attributes)
- def call_llm(messages: list[ChatCompletionMessageParam]) -> Optional[str]:
- load_dotenv()
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
- return (
- client.chat.completions.create(
- model="gpt-4o",
- messages=messages,
- temperature=0.8,
- )
- .choices[0]
- .message.content
- )
+ call_llm = _test_scenario(
+ opentelemetry_tracer=tracer,
+ path=None,
+ attributes=test_attributes,
+ )
- call_llm(call_llm_messages)
+ call_llm(
+ provider="openai",
+ model="gpt-4o",
+ messages=call_llm_messages,
+ )
assert len(exporter.get_finished_spans()) == 2
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 2b3475fd..26eaa000 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -10,9 +10,12 @@
def test_calculator_decorator(
- opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
- @tool()
+ # GIVEN a test OpenTelemetry configuration
+ tracer, exporter = opentelemetry_test_configuration
+
+ @tool(opentelemetry_tracer=tracer)
def calculator(operation: str, num1: float, num2: float) -> float:
"""Do arithmetic operations on two numbers."""
if operation == "add":
@@ -26,8 +29,6 @@ def calculator(operation: str, num1: float, num2: float) -> float:
else:
raise ValueError(f"Invalid operation: {operation}")
- # GIVEN a test OpenTelemetry configuration
- _, exporter = opentelemetry_hl_test_configuration
# WHEN calling the @tool decorated function
result = calculator(operation="add", num1=1, num2=2)
# THEN a single span is created and the log and file attributes are correctly set
@@ -48,8 +49,10 @@ def calculator(operation: str, num1: float, num2: float) -> float:
Validator.check_schema(calculator.json_schema)
-def test_union_type():
- @tool()
+def test_union_type(opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter]):
+ tracer, _ = opentelemetry_test_configuration
+
+ @tool(opentelemetry_tracer=tracer)
def foo(a: Union[int, float], b: float) -> float:
return a + b
@@ -66,11 +69,11 @@ def foo(a: Union[int, float], b: float) -> float:
def test_not_required_parameter(
- opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
- _, exporter = opentelemetry_hl_test_configuration
+ tracer, exporter = opentelemetry_test_configuration
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def test_calculator(a: Optional[float], b: float) -> float:
if a is None:
a = 0
@@ -85,9 +88,14 @@ def test_calculator(a: Optional[float], b: float) -> float:
Validator.check_schema(test_calculator.json_schema)
-def test_no_annotation_on_parameter():
+def test_no_annotation_on_parameter(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a function annotated with @tool and without type hint on a parameter
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def calculator(a: Optional[float], b) -> float:
if a is None:
a = 0
@@ -115,9 +123,14 @@ def calculator(a: Optional[float], b) -> float:
Validator.check_schema(calculator.json_schema)
-def test_dict_annotation_no_sub_types():
+def test_dict_annotation_no_sub_types(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a function annotated with @tool and without type hint on a parameter
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def calculator(a: Optional[float], b: dict) -> float:
if a is None:
a = 0
@@ -150,9 +163,14 @@ def calculator(a: Optional[float], b: dict) -> float:
Validator.check_schema(calculator.json_schema)
-def test_list_annotation_no_sub_types():
+def test_list_annotation_no_sub_types(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a function annotated with @tool and without type hint on a parameter
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def calculator(a: Optional[float], b: Optional[list]) -> float:
if a is None:
a = 0
@@ -184,9 +202,14 @@ def calculator(a: Optional[float], b: Optional[list]) -> float:
}
-def test_tuple_annotation_no_sub_types():
+def test_tuple_annotation_no_sub_types(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a function annotated with @tool and without type hint on a parameter
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def calculator(a: Optional[float], b: Optional[tuple]) -> float:
if a is None:
a = 0
@@ -218,10 +241,15 @@ def calculator(a: Optional[float], b: Optional[tuple]) -> float:
}
-def test_function_without_return_annotation():
+def test_function_without_return_annotation(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a function annotated with @tool and without type hint on the return value
# WHEN building the Tool kernel
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo(a: Optional[float], b: float) -> float:
"""Add two numbers."""
if a is None:
@@ -233,13 +261,13 @@ def foo(a: Optional[float], b: float) -> float:
def test_list_annotation_parameter(
- opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
- # GIVEN an OTel configuration with HL Processor
- _, exporter = opentelemetry_hl_test_configuration
+ # GIVEN an OTel configuration
+ tracer, exporter = opentelemetry_test_configuration
# WHEN defining a tool with a list parameter
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo(to_join: list[str]) -> str:
return " ".join(to_join)
@@ -256,10 +284,15 @@ def foo(to_join: list[str]) -> str:
Validator.check_schema(foo.json_schema)
-def test_list_in_list_parameter_annotation():
+def test_list_in_list_parameter_annotation(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a tool definition with a list of lists parameter
# WHEN building the Tool Kernel
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def nested_plain_join(to_join: list[list[str]]):
return " ".join([val for sub_list in to_join for val in sub_list])
@@ -276,10 +309,15 @@ def nested_plain_join(to_join: list[list[str]]):
Validator.check_schema(nested_plain_join.json_schema)
-def test_complex_dict_annotation():
+def test_complex_dict_annotation(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a tool definition with a dictionary parameter
# WHEN building the Tool Kernel
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo(a: dict[Union[int, str], list[str]]):
return a
@@ -296,10 +334,15 @@ def foo(a: dict[Union[int, str], list[str]]):
Validator.check_schema(foo.json_schema)
-def test_tuple_annotation():
+def test_tuple_annotation(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a tool definition with a tuple parameter
# WHEN building the Tool Kernel
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo(a: Optional[tuple[int, Optional[str], float]]):
return a
@@ -317,10 +360,15 @@ def foo(a: Optional[tuple[int, Optional[str], float]]):
Validator.check_schema(foo.json_schema)
-def test_strict_false():
+def test_strict_false(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a tool definition with strict=False
# WHEN building the Tool Kernel
- @tool(strict=False)
+ @tool(opentelemetry_tracer=tracer, strict=False)
def foo(a: int, b: int) -> int:
return a + b
@@ -331,10 +379,15 @@ def foo(a: int, b: int) -> int:
Validator.check_schema(foo.json_schema)
-def test_tool_no_args():
+def test_tool_no_args(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a tool definition without arguments
# WHEN building the Tool Kernel
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo():
return 42
@@ -355,7 +408,12 @@ def foo():
Validator.check_schema(foo.json_schema)
-def test_custom_types_throws():
+def test_custom_types_throws(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
# GIVEN a user-defined type
class Foo(TypedDict):
a: int # type: ignore
@@ -364,7 +422,7 @@ class Foo(TypedDict):
# WHEN defining a tool with a parameter of that type
with pytest.raises(ValueError) as exc:
- @tool()
+ @tool(opentelemetry_tracer=tracer)
def foo_bar(foo: Foo):
return foo.a + foo.b # type: ignore
From fa2b30f43f4b37a2afcb259c8d35b72b3e091dad Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 31 Oct 2024 18:32:56 +0000
Subject: [PATCH 31/70] Changed dependency logic for instrumentors
---
poetry.lock | 196 ++++++++++++++++++++++++-------------------------
pyproject.toml | 14 ++--
2 files changed, 105 insertions(+), 105 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 95a95b1b..54b71e22 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -549,84 +549,84 @@ files = [
[[package]]
name = "jiter"
-version = "0.6.1"
+version = "0.7.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
files = [
- {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"},
- {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"},
- {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"},
- {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"},
- {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"},
- {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"},
- {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"},
- {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"},
- {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"},
- {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"},
- {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"},
- {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"},
- {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"},
- {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"},
- {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"},
- {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"},
- {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"},
- {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"},
- {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"},
- {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"},
- {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"},
- {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"},
- {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"},
- {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"},
- {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"},
- {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"},
- {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"},
- {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"},
- {file = "jiter-0.6.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:31d8e00e1fb4c277df8ab6f31a671f509ebc791a80e5c61fdc6bc8696aaa297c"},
- {file = "jiter-0.6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77c296d65003cd7ee5d7b0965f6acbe6cffaf9d1fa420ea751f60ef24e85fed5"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeeb0c0325ef96c12a48ea7e23e2e86fe4838e6e0a995f464cf4c79fa791ceeb"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a31c6fcbe7d6c25d6f1cc6bb1cba576251d32795d09c09961174fe461a1fb5bd"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59e2b37f3b9401fc9e619f4d4badcab2e8643a721838bcf695c2318a0475ae42"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bae5ae4853cb9644144e9d0755854ce5108d470d31541d83f70ca7ecdc2d1637"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9df588e9c830b72d8db1dd7d0175af6706b0904f682ea9b1ca8b46028e54d6e9"},
- {file = "jiter-0.6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15f8395e835cf561c85c1adee72d899abf2733d9df72e9798e6d667c9b5c1f30"},
- {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a99d4e0b5fc3b05ea732d67eb2092fe894e95a90e6e413f2ea91387e228a307"},
- {file = "jiter-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a311df1fa6be0ccd64c12abcd85458383d96e542531bafbfc0a16ff6feda588f"},
- {file = "jiter-0.6.1-cp38-none-win32.whl", hash = "sha256:81116a6c272a11347b199f0e16b6bd63f4c9d9b52bc108991397dd80d3c78aba"},
- {file = "jiter-0.6.1-cp38-none-win_amd64.whl", hash = "sha256:13f9084e3e871a7c0b6e710db54444088b1dd9fbefa54d449b630d5e73bb95d0"},
- {file = "jiter-0.6.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:f1c53615fcfec3b11527c08d19cff6bc870da567ce4e57676c059a3102d3a082"},
- {file = "jiter-0.6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f791b6a4da23238c17a81f44f5b55d08a420c5692c1fda84e301a4b036744eb1"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c97e90fec2da1d5f68ef121444c2c4fa72eabf3240829ad95cf6bbeca42a301"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3cbc1a66b4e41511209e97a2866898733c0110b7245791ac604117b7fb3fedb7"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4e85f9e12cd8418ab10e1fcf0e335ae5bb3da26c4d13a0fd9e6a17a674783b6"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08be33db6dcc374c9cc19d3633af5e47961a7b10d4c61710bd39e48d52a35824"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:677be9550004f5e010d673d3b2a2b815a8ea07a71484a57d3f85dde7f14cf132"},
- {file = "jiter-0.6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e8bd065be46c2eecc328e419d6557bbc37844c88bb07b7a8d2d6c91c7c4dedc9"},
- {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bd95375ce3609ec079a97c5d165afdd25693302c071ca60c7ae1cf826eb32022"},
- {file = "jiter-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db459ed22d0208940d87f614e1f0ea5a946d29a3cfef71f7e1aab59b6c6b2afb"},
- {file = "jiter-0.6.1-cp39-none-win32.whl", hash = "sha256:d71c962f0971347bd552940ab96aa42ceefcd51b88c4ced8a27398182efa8d80"},
- {file = "jiter-0.6.1-cp39-none-win_amd64.whl", hash = "sha256:d465db62d2d10b489b7e7a33027c4ae3a64374425d757e963f86df5b5f2e7fc5"},
- {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"},
+ {file = "jiter-0.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e14027f61101b3f5e173095d9ecf95c1cac03ffe45a849279bde1d97e559e314"},
+ {file = "jiter-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:979ec4711c2e37ac949561858bd42028884c9799516a923e1ff0b501ef341a4a"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:662d5d3cca58ad6af7a3c6226b641c8655de5beebcb686bfde0df0f21421aafa"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d89008fb47043a469f97ad90840b97ba54e7c3d62dc7cbb6cbf938bd0caf71d"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8b16c35c846a323ce9067170d5ab8c31ea3dbcab59c4f7608bbbf20c2c3b43f"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9e82daaa1b0a68704f9029b81e664a5a9de3e466c2cbaabcda5875f961702e7"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a87a9f586636e1f0dd3651a91f79b491ea0d9fd7cbbf4f5c463eebdc48bda7"},
+ {file = "jiter-0.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ec05b1615f96cc3e4901678bc863958611584072967d9962f9e571d60711d52"},
+ {file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a5cb97e35370bde7aa0d232a7f910f5a0fbbc96bc0a7dbaa044fd5cd6bcd7ec3"},
+ {file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb316dacaf48c8c187cea75d0d7f835f299137e6fdd13f691dff8f92914015c7"},
+ {file = "jiter-0.7.0-cp310-none-win32.whl", hash = "sha256:243f38eb4072763c54de95b14ad283610e0cd3bf26393870db04e520f60eebb3"},
+ {file = "jiter-0.7.0-cp310-none-win_amd64.whl", hash = "sha256:2221d5603c139f6764c54e37e7c6960c469cbcd76928fb10d15023ba5903f94b"},
+ {file = "jiter-0.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:91cec0ad755bd786c9f769ce8d843af955df6a8e56b17658771b2d5cb34a3ff8"},
+ {file = "jiter-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:feba70a28a27d962e353e978dbb6afd798e711c04cb0b4c5e77e9d3779033a1a"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9d866ec066c3616cacb8535dbda38bb1d470b17b25f0317c4540182bc886ce2"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7a7a00b6f9f18289dd563596f97ecaba6c777501a8ba04bf98e03087bcbc60"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aaf564094c7db8687f2660605e099f3d3e6ea5e7135498486674fcb78e29165"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4d27e09825c1b3c7a667adb500ce8b840e8fc9f630da8454b44cdd4fb0081bb"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca7c287da9c1d56dda88da1d08855a787dbb09a7e2bd13c66a2e288700bd7c7"},
+ {file = "jiter-0.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db19a6d160f093cbc8cd5ea2abad420b686f6c0e5fb4f7b41941ebc6a4f83cda"},
+ {file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e46a63c7f877cf7441ffc821c28287cfb9f533ae6ed707bde15e7d4dfafa7ae"},
+ {file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ba426fa7ff21cb119fa544b75dd3fbee6a70e55a5829709c0338d07ccd30e6d"},
+ {file = "jiter-0.7.0-cp311-none-win32.whl", hash = "sha256:c07f55a64912b0c7982377831210836d2ea92b7bd343fca67a32212dd72e38e0"},
+ {file = "jiter-0.7.0-cp311-none-win_amd64.whl", hash = "sha256:ed27b2c43e1b5f6c7fedc5c11d4d8bfa627de42d1143d87e39e2e83ddefd861a"},
+ {file = "jiter-0.7.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac7930bcaaeb1e229e35c91c04ed2e9f39025b86ee9fc3141706bbf6fff4aeeb"},
+ {file = "jiter-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:571feae3e7c901a8eedde9fd2865b0dfc1432fb15cab8c675a8444f7d11b7c5d"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8af4df8a262fa2778b68c2a03b6e9d1cb4d43d02bea6976d46be77a3a331af1"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd028d4165097a611eb0c7494d8c1f2aebd46f73ca3200f02a175a9c9a6f22f5"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6b487247c7836810091e9455efe56a52ec51bfa3a222237e1587d04d3e04527"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6d28a92f28814e1a9f2824dc11f4e17e1df1f44dc4fdeb94c5450d34bcb2602"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90443994bbafe134f0b34201dad3ebe1c769f0599004084e046fb249ad912425"},
+ {file = "jiter-0.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f9abf464f9faac652542ce8360cea8e68fba2b78350e8a170248f9bcc228702a"},
+ {file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db7a8d99fc5f842f7d2852f06ccaed066532292c41723e5dff670c339b649f88"},
+ {file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:15cf691ebd8693b70c94627d6b748f01e6d697d9a6e9f2bc310934fcfb7cf25e"},
+ {file = "jiter-0.7.0-cp312-none-win32.whl", hash = "sha256:9dcd54fa422fb66ca398bec296fed5f58e756aa0589496011cfea2abb5be38a5"},
+ {file = "jiter-0.7.0-cp312-none-win_amd64.whl", hash = "sha256:cc989951f73f9375b8eacd571baaa057f3d7d11b7ce6f67b9d54642e7475bfad"},
+ {file = "jiter-0.7.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:24cecd18df540963cd27c08ca5ce1d0179f229ff78066d9eecbe5add29361340"},
+ {file = "jiter-0.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d41b46236b90b043cca73785674c23d2a67d16f226394079d0953f94e765ed76"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b160db0987171365c153e406a45dcab0ee613ae3508a77bfff42515cb4ce4d6e"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1c8d91e0f0bd78602eaa081332e8ee4f512c000716f5bc54e9a037306d693a7"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:997706c683195eeff192d2e5285ce64d2a610414f37da3a3f2625dcf8517cf90"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ea52a8a0ff0229ab2920284079becd2bae0688d432fca94857ece83bb49c541"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d77449d2738cf74752bb35d75ee431af457e741124d1db5e112890023572c7c"},
+ {file = "jiter-0.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8203519907a1d81d6cb00902c98e27c2d0bf25ce0323c50ca594d30f5f1fbcf"},
+ {file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41d15ccc53931c822dd7f1aebf09faa3cda2d7b48a76ef304c7dbc19d1302e51"},
+ {file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:febf3179b2fabf71fbd2fd52acb8594163bb173348b388649567a548f356dbf6"},
+ {file = "jiter-0.7.0-cp313-none-win32.whl", hash = "sha256:4a8e2d866e7eda19f012444e01b55079d8e1c4c30346aaac4b97e80c54e2d6d3"},
+ {file = "jiter-0.7.0-cp313-none-win_amd64.whl", hash = "sha256:7417c2b928062c496f381fb0cb50412eee5ad1d8b53dbc0e011ce45bb2de522c"},
+ {file = "jiter-0.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9c62c737b5368e51e74960a08fe1adc807bd270227291daede78db24d5fbf556"},
+ {file = "jiter-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e4640722b1bef0f6e342fe4606aafaae0eb4f4be5c84355bb6867f34400f6688"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f367488c3b9453eab285424c61098faa1cab37bb49425e69c8dca34f2dfe7d69"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cf5d42beb3514236459454e3287db53d9c4d56c4ebaa3e9d0efe81b19495129"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc5190ea1113ee6f7252fa8a5fe5a6515422e378356c950a03bbde5cafbdbaab"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63ee47a149d698796a87abe445fc8dee21ed880f09469700c76c8d84e0d11efd"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48592c26ea72d3e71aa4bea0a93454df907d80638c3046bb0705507b6704c0d7"},
+ {file = "jiter-0.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:79fef541199bd91cfe8a74529ecccb8eaf1aca38ad899ea582ebbd4854af1e51"},
+ {file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d1ef6bb66041f2514739240568136c81b9dcc64fd14a43691c17ea793b6535c0"},
+ {file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca4d950863b1c238e315bf159466e064c98743eef3bd0ff9617e48ff63a4715"},
+ {file = "jiter-0.7.0-cp38-none-win32.whl", hash = "sha256:897745f230350dcedb8d1ebe53e33568d48ea122c25e6784402b6e4e88169be7"},
+ {file = "jiter-0.7.0-cp38-none-win_amd64.whl", hash = "sha256:b928c76a422ef3d0c85c5e98c498ce3421b313c5246199541e125b52953e1bc0"},
+ {file = "jiter-0.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9b669ff6f8ba08270dee9ccf858d3b0203b42314a428a1676762f2d390fbb64"},
+ {file = "jiter-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5be919bacd73ca93801c3042bce6e95cb9c555a45ca83617b9b6c89df03b9c2"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a282e1e8a396dabcea82d64f9d05acf7efcf81ecdd925b967020dcb0e671c103"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:17ecb1a578a56e97a043c72b463776b5ea30343125308f667fb8fce4b3796735"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b6045fa0527129218cdcd8a8b839f678219686055f31ebab35f87d354d9c36e"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:189cc4262a92e33c19d4fd24018f5890e4e6da5b2581f0059938877943f8298c"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c138414839effbf30d185e30475c6dc8a16411a1e3681e5fd4605ab1233ac67a"},
+ {file = "jiter-0.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2791604acef33da6b72d5ecf885a32384bcaf9aa1e4be32737f3b8b9588eef6a"},
+ {file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae60ec89037a78d60bbf3d8b127f1567769c8fa24886e0abed3f622791dea478"},
+ {file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:836f03dea312967635233d826f783309b98cfd9ccc76ac776e224cfcef577862"},
+ {file = "jiter-0.7.0-cp39-none-win32.whl", hash = "sha256:ebc30ae2ce4bc4986e1764c404b4ea1924f926abf02ce92516485098f8545374"},
+ {file = "jiter-0.7.0-cp39-none-win_amd64.whl", hash = "sha256:abf596f951370c648f37aa9899deab296c42a3829736e598b0dd10b08f77a44d"},
+ {file = "jiter-0.7.0.tar.gz", hash = "sha256:c061d9738535497b5509f8970584f20de1e900806b239a39a9994fc191dad630"},
]
[[package]]
@@ -778,89 +778,89 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.33.5"
+version = "0.33.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.33.5-py3-none-any.whl", hash = "sha256:3e94d6293c28e805957a5c665bfabb7b4a5165b50dee7e6940b1b3606598cc09"},
- {file = "opentelemetry_instrumentation_anthropic-0.33.5.tar.gz", hash = "sha256:a7856cd47926b61b2fa722e1a5f30612fed23863a5211c156feda483c294a5eb"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.3-py3-none-any.whl", hash = "sha256:dc4110c6400708d600f79fd78e8e8fe04b90a82b44949817cc91c961cd4db6e7"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.3.tar.gz", hash = "sha256:d245f1c732caebe4706a4900084758296d1d46d37e042bbd8542d0aa0e691899"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.2"
+opentelemetry-semantic-conventions-ai = "0.4.1"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.33.5"
+version = "0.33.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.33.5-py3-none-any.whl", hash = "sha256:bd69ae1f87d531ca6cf91eb5e5b4cd1eefeaa9da43a39725d7b90e3dd3d27158"},
- {file = "opentelemetry_instrumentation_cohere-0.33.5.tar.gz", hash = "sha256:3bab99113f1cbd3d592f9e0f217e275375bd7c0ed9ab62931d8a31e317033f84"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.3-py3-none-any.whl", hash = "sha256:b0a614a321f332e31eb74980a603303123b58a3627a11e7db5f13a8b3c660311"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.3.tar.gz", hash = "sha256:9d940cb30b7e4be94f063f5afadeb2572f4cfe69a731d7c45faaa9f034991a5e"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.2"
+opentelemetry-semantic-conventions-ai = "0.4.1"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.33.5"
+version = "0.33.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.33.5-py3-none-any.whl", hash = "sha256:c772545fbffa68f508457eaf2f7a7ae55edf4c5e66ed6ec7692a234c43f69c81"},
- {file = "opentelemetry_instrumentation_groq-0.33.5.tar.gz", hash = "sha256:1c123a93a5582407911a33f3e8bfcb8abedfcac9f279aa5739179c5482d73d42"},
+ {file = "opentelemetry_instrumentation_groq-0.33.3-py3-none-any.whl", hash = "sha256:53d75f8ec2dbcf5e0f06ed53a7a4cb875823749cb96bbc07dbb7a1d5ee374e32"},
+ {file = "opentelemetry_instrumentation_groq-0.33.3.tar.gz", hash = "sha256:98408aaf91e2d55ad348deb12666339fbcb972b18ec511c4f394d3fac37041eb"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.2"
+opentelemetry-semantic-conventions-ai = "0.4.1"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.33.5"
+version = "0.33.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.33.5-py3-none-any.whl", hash = "sha256:bf9d238af2e37ad15f6e7e199f5f0886d38f5ccea05e9ad08375513b8247b4a3"},
- {file = "opentelemetry_instrumentation_openai-0.33.5.tar.gz", hash = "sha256:0e7f5eb8e67ef5c14e80f040484f42cfcc04a9ebb0fc53947663a27ee3470e4f"},
+ {file = "opentelemetry_instrumentation_openai-0.33.3-py3-none-any.whl", hash = "sha256:f5ef4452b269bb409cc260fd611834c33296495e39700fd6e6f83a1cef07b9fd"},
+ {file = "opentelemetry_instrumentation_openai-0.33.3.tar.gz", hash = "sha256:06ad92d5d852f93ee7c0d9b545a412df5265044dae4d6be7056a10fa8afb2fdc"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.2"
+opentelemetry-semantic-conventions-ai = "0.4.1"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.33.5"
+version = "0.33.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.33.5-py3-none-any.whl", hash = "sha256:213c47dad5bebd6f26b2e7ab50529ce082782ee56f0e2752c86d86532474e6eb"},
- {file = "opentelemetry_instrumentation_replicate-0.33.5.tar.gz", hash = "sha256:d039cba78589dbefdf2e3c76eb4461b940db644260e88fba71a24ff413ce83cd"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.3-py3-none-any.whl", hash = "sha256:c2870c1939b69ff3c57a508404cec75329e07c907eb9600f47ec64be2c0b8310"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.3.tar.gz", hash = "sha256:06c9f63f7c235392567b10efe20f8cb2379f322d0a72e4c52ab4912f1ebb943a"},
]
[package.dependencies]
opentelemetry-api = ">=1.27.0,<2.0.0"
opentelemetry-instrumentation = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.2"
+opentelemetry-semantic-conventions-ai = "0.4.1"
[[package]]
name = "opentelemetry-sdk"
@@ -895,13 +895,13 @@ opentelemetry-api = "1.27.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.2"
+version = "0.4.1"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"},
]
[[package]]
@@ -1905,4 +1905,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "7fcc781f96cb6cefe8ec50a22f6410b4b20f02651e787d06dbad4cd46416cc8b"
+content-hash = "83aa5483bed11f7992b59be1c59922c1802b03742be6d6d41abb46fe8848a770"
diff --git a/pyproject.toml b/pyproject.toml
index 4339dd85..3e033710 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,13 +38,13 @@ pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
parse = "^1.20.2"
-opentelemetry-sdk = "^1.27.0"
-opentelemetry-api = "^1.27.0"
-opentelemetry-instrumentation-openai = "^0.33.3"
-opentelemetry-instrumentation-cohere = "^0.33.3"
-opentelemetry-instrumentation-anthropic = "^0.33.3"
-opentelemetry-instrumentation-groq = "^0.33.3"
-opentelemetry-instrumentation-replicate = "^0.33.3"
+opentelemetry-sdk = "<=1.27.0"
+opentelemetry-api = "<=1.27.0"
+opentelemetry-instrumentation-openai = "<=0.33.3"
+opentelemetry-instrumentation-cohere = "<=0.33.3"
+opentelemetry-instrumentation-anthropic = "<=0.33.3"
+opentelemetry-instrumentation-groq = "<=0.33.3"
+opentelemetry-instrumentation-replicate = "<=0.33.3"
[tool.poetry.group.dev.dependencies]
parse-type = "^0.6.4"
From ad3d3a8f07cf10bd5aeeb5e063b9e7f43541c580 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 4 Nov 2024 15:38:37 +0000
Subject: [PATCH 32/70] Integrated OTel decorators with local eval utility
---
.fernignore | 2 +-
poetry.lock | 234 ++++++++++-
pyproject.toml | 1 +
src/humanloop/client.py | 7 +-
src/humanloop/decorators/flow.py | 76 ++--
src/humanloop/decorators/prompt.py | 57 ++-
src/humanloop/decorators/tool.py | 78 ++--
.../{eval_utils.py => eval_utils/__init__.py} | 366 ++++++++----------
src/humanloop/eval_utils/context.py | 13 +
src/humanloop/eval_utils/domain.py | 111 ++++++
src/humanloop/eval_utils/shared.py | 51 +++
src/humanloop/otel/__init__.py | 45 +--
src/humanloop/otel/constants.py | 2 -
src/humanloop/otel/exporter.py | 131 ++++---
src/humanloop/otel/helpers.py | 2 +-
src/humanloop/otel/processor.py | 5 +-
.../requests/create_datapoint_request.py | 1 -
tests/decorators/test_flow_decorator.py | 31 +-
tests/decorators/test_tool_decorator.py | 2 +-
19 files changed, 760 insertions(+), 455 deletions(-)
rename src/humanloop/{eval_utils.py => eval_utils/__init__.py} (63%)
create mode 100644 src/humanloop/eval_utils/context.py
create mode 100644 src/humanloop/eval_utils/domain.py
create mode 100644 src/humanloop/eval_utils/shared.py
diff --git a/.fernignore b/.fernignore
index a24236fe..08310ad3 100644
--- a/.fernignore
+++ b/.fernignore
@@ -1,6 +1,6 @@
# Specify files that shouldn't be modified by Fern
-src/humanloop/eval_utils.py
+src/humanloop/eval_utils/*
src/humanloop/prompt_utils.py
src/humanloop/client.py
mypy.ini
diff --git a/poetry.lock b/poetry.lock
index 54b71e22..70b743b5 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -204,13 +204,13 @@ files = [
[[package]]
name = "cohere"
-version = "5.11.2"
+version = "5.11.3"
description = ""
optional = false
python-versions = "<4.0,>=3.8"
files = [
- {file = "cohere-5.11.2-py3-none-any.whl", hash = "sha256:310adb975817068488ba60d2d39e65b8fd28756df9a4905d5b16a69f79d78db7"},
- {file = "cohere-5.11.2.tar.gz", hash = "sha256:99498e20343947ef1e1e01165312dd2fbf40be4f9eac336f9b71efba55e7ba6e"},
+ {file = "cohere-5.11.3-py3-none-any.whl", hash = "sha256:96a0414af083337610e2f6de18f53ffaf5cb3f7aee763605d493c95ff981ad9f"},
+ {file = "cohere-5.11.3.tar.gz", hash = "sha256:a6587e7ef66ab377f37fdc13e5679375c4a45aef9d2047662a3e7737df7c6599"},
]
[package.dependencies]
@@ -721,6 +721,122 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
+[[package]]
+name = "numpy"
+version = "2.0.2"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"},
+ {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"},
+ {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"},
+ {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"},
+ {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"},
+ {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"},
+ {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"},
+ {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"},
+ {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"},
+ {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"},
+ {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"},
+ {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"},
+ {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"},
+ {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"},
+ {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"},
+ {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"},
+ {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"},
+ {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"},
+ {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"},
+ {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"},
+ {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"},
+ {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"},
+ {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"},
+ {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"},
+ {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"},
+ {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"},
+ {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"},
+ {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"},
+ {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"},
+ {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"},
+ {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"},
+ {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"},
+ {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"},
+ {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"},
+ {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"},
+ {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"},
+ {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"},
+ {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"},
+ {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"},
+ {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"},
+ {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"},
+ {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"},
+ {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"},
+ {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"},
+ {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"},
+]
+
+[[package]]
+name = "numpy"
+version = "2.1.2"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.10"
+files = [
+ {file = "numpy-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee"},
+ {file = "numpy-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884"},
+ {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"},
+ {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d"},
+ {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86"},
+ {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7"},
+ {file = "numpy-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03"},
+ {file = "numpy-2.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466"},
+ {file = "numpy-2.1.2-cp310-cp310-win32.whl", hash = "sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb"},
+ {file = "numpy-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2"},
+ {file = "numpy-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe"},
+ {file = "numpy-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1"},
+ {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f"},
+ {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4"},
+ {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a"},
+ {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1"},
+ {file = "numpy-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2"},
+ {file = "numpy-2.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146"},
+ {file = "numpy-2.1.2-cp311-cp311-win32.whl", hash = "sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c"},
+ {file = "numpy-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9"},
+ {file = "numpy-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b"},
+ {file = "numpy-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db"},
+ {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1"},
+ {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426"},
+ {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0"},
+ {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df"},
+ {file = "numpy-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366"},
+ {file = "numpy-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142"},
+ {file = "numpy-2.1.2-cp312-cp312-win32.whl", hash = "sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550"},
+ {file = "numpy-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e"},
+ {file = "numpy-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d"},
+ {file = "numpy-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf"},
+ {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e"},
+ {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3"},
+ {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8"},
+ {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a"},
+ {file = "numpy-2.1.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98"},
+ {file = "numpy-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe"},
+ {file = "numpy-2.1.2-cp313-cp313-win32.whl", hash = "sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a"},
+ {file = "numpy-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445"},
+ {file = "numpy-2.1.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5"},
+ {file = "numpy-2.1.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0"},
+ {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17"},
+ {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6"},
+ {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8"},
+ {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35"},
+ {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62"},
+ {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a"},
+ {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952"},
+ {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5"},
+ {file = "numpy-2.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7"},
+ {file = "numpy-2.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e"},
+ {file = "numpy-2.1.2.tar.gz", hash = "sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c"},
+]
+
[[package]]
name = "openai"
version = "1.53.0"
@@ -915,6 +1031,92 @@ files = [
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
+[[package]]
+name = "pandas"
+version = "2.2.3"
+description = "Powerful data structures for data analysis, time series, and statistics"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
+ {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
+ {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"},
+ {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"},
+ {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"},
+ {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"},
+ {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"},
+ {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"},
+ {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"},
+ {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"},
+ {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"},
+ {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"},
+ {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"},
+ {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"},
+ {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"},
+ {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"},
+ {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"},
+ {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"},
+ {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"},
+ {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"},
+ {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"},
+ {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"},
+ {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"},
+ {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"},
+ {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"},
+ {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"},
+ {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"},
+ {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"},
+ {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"},
+ {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"},
+ {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"},
+ {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"},
+ {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"},
+ {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"},
+ {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"},
+ {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"},
+ {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"},
+ {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"},
+ {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"},
+ {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"},
+ {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"},
+ {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.22.4", markers = "python_version < \"3.11\""},
+ {version = ">=1.23.2", markers = "python_version == \"3.11\""},
+ {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
+]
+python-dateutil = ">=2.8.2"
+pytz = ">=2020.1"
+tzdata = ">=2022.7"
+
+[package.extras]
+all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
+aws = ["s3fs (>=2022.11.0)"]
+clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
+compression = ["zstandard (>=0.19.0)"]
+computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
+consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
+feather = ["pyarrow (>=10.0.1)"]
+fss = ["fsspec (>=2022.11.0)"]
+gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
+hdf5 = ["tables (>=3.8.0)"]
+html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
+mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
+parquet = ["pyarrow (>=10.0.1)"]
+performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
+plot = ["matplotlib (>=3.6.3)"]
+postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
+pyarrow = ["pyarrow (>=10.0.1)"]
+spss = ["pyreadstat (>=1.2.0)"]
+sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
+test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.9.2)"]
+
[[package]]
name = "parameterized"
version = "0.9.0"
@@ -990,8 +1192,8 @@ files = [
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
typing-extensions = [
- {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
{version = ">=4.6.1", markers = "python_version < \"3.13\""},
+ {version = ">=4.12.2", markers = "python_version >= \"3.13\""},
]
[package.extras]
@@ -1167,6 +1369,17 @@ files = [
[package.extras]
cli = ["click (>=5.0)"]
+[[package]]
+name = "pytz"
+version = "2024.2"
+description = "World timezone definitions, modern and historical"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
+ {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
+]
+
[[package]]
name = "pyyaml"
version = "6.0.2"
@@ -1787,6 +2000,17 @@ files = [
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
+[[package]]
+name = "tzdata"
+version = "2024.2"
+description = "Provider of IANA time zone data"
+optional = false
+python-versions = ">=2"
+files = [
+ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
+ {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
+]
+
[[package]]
name = "urllib3"
version = "2.2.3"
@@ -1905,4 +2129,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "83aa5483bed11f7992b59be1c59922c1802b03742be6d6d41abb46fe8848a770"
+content-hash = "3bfca7f42f99c638b20663590b8c958ea2569694da45c8a849e10287d398518c"
diff --git a/pyproject.toml b/pyproject.toml
index 3e033710..56e4f776 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -62,6 +62,7 @@ types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
python-dotenv = "^1.0.1"
openai = "^1.52.2"
+pandas = "^2.2.3"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 7550be35..bded949e 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,6 +1,6 @@
import os
import typing
-from typing import Any, List, Optional, Sequence
+from typing import Any, Callable, List, Optional, Sequence, Union
import httpx
from opentelemetry.sdk.resources import Resource
@@ -18,7 +18,8 @@
from .decorators.prompt import prompt as prompt_decorator_factory
from .decorators.tool import tool as tool_decorator_factory
from .environment import HumanloopEnvironment
-from .eval_utils import Dataset, Evaluator, EvaluatorCheck, File, _run_eval
+from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.eval_utils import _run_eval
from .evaluations.client import EvaluationsClient
from .otel import instrument_provider
from .otel.exporter import HumanloopSpanExporter
@@ -32,7 +33,7 @@ class ExtendedEvalsClient(EvaluationsClient):
def run(
self,
- file: File,
+ file: Union[File, Callable],
name: Optional[str],
dataset: Dataset,
evaluators: Optional[Sequence[Evaluator]] = None,
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index e28cb5bd..a2441e96 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -3,10 +3,16 @@
from typing import Any, Callable, Optional
from opentelemetry.trace import Tracer
+from opentelemetry.sdk.trace import ReadableSpan
from humanloop.decorators.helpers import args_to_inputs
-from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
+from humanloop.eval_utils import File
+from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
+from humanloop.otel.constants import (
+ HL_FILE_OT_KEY,
+ HL_LOG_OT_KEY,
+ HL_OT_EMPTY_VALUE,
+)
from humanloop.otel.helpers import write_to_opentelemetry_span
@@ -21,49 +27,29 @@ def flow(
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
+ span: ReadableSpan
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_parent_metadata()
-
- if trace_metadata:
- # Add Trace metadata to the Span so it can be correctly
- # linked to the parent Span. trace_metadata will be
- # non-null if the function is called by a @flow
- # decorated function.
- write_to_opentelemetry_span(
- span=span,
- key=HL_TRACE_METADATA_KEY,
- value={
- "trace_parent_id": trace_metadata["trace_parent_id"],
- "trace_id": span.get_span_context().span_id,
- "is_flow_log": True,
- },
+ span_id = span.get_span_context().span_id
+ if span.parent:
+ span_parent_id = span.parent.span_id
+ else:
+ span_parent_id = None
+ parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id)
+ if parent_trace_metadata:
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ trace_id=span_id,
+ trace_parent_id=span_parent_id,
+ is_flow_log=True,
)
else:
# The Flow Log is not nested under another Flow Log
# Set the trace_id to the current span_id
- write_to_opentelemetry_span(
- span=span,
- key=HL_TRACE_METADATA_KEY,
- value={
- "trace_id": span.get_span_context().span_id,
- "is_flow_log": True,
- },
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ trace_id=span_id,
+ trace_parent_id=None,
+ is_flow_log=True,
)
- # Add Trace metadata to the context for the children
- # Spans to be able to link to the parent Span
- # Unlike other decorators, which push to context stack
- # only if trace_metadata is present, this decorator
- # always pushes to context stack since it is responsible
- # for creating the context stack
- push_trace_context(
- {
- "trace_id": span.get_span_context().span_id,
- "trace_parent_id": span.get_span_context().span_id,
- "is_flow_log": True,
- },
- )
-
# Write the Flow Kernel to the Span on HL_FILE_OT_KEY
write_to_opentelemetry_span(
span=span,
@@ -80,11 +66,6 @@ def wrapper(*args, **kwargs):
# Call the decorated function
output = func(*args, **kwargs)
- # All children Spans have been created when the decorated function returns
- # Remove the Trace metadata from the context so the siblings can have
- # their children linked properly
- pop_trace_context()
-
# Write the Flow Log to the Span on HL_LOG_OT_KEY
write_to_opentelemetry_span(
span=span,
@@ -98,6 +79,15 @@ def wrapper(*args, **kwargs):
# Return the output of the decorated function
return output
+ func.file = File( # type: ignore
+ id=None,
+ path=path if path else func.__name__,
+ type="flow",
+ version=attributes,
+ is_decorated=True,
+ callable=wrapper,
+ )
+
return wrapper
return decorator
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 9b6305e9..31e884e8 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -4,8 +4,14 @@
from opentelemetry.trace import Tracer
-from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
+from humanloop.eval_utils import File
+from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
+
+from humanloop.otel.constants import (
+ HL_FILE_OT_KEY,
+ HL_LOG_OT_KEY,
+ HL_OT_EMPTY_VALUE,
+)
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
@@ -74,26 +80,17 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_parent_metadata()
-
- if trace_metadata:
- # Add Trace metadata to the Span so it can be correctly
- # linked to the parent Span. trace_metadata will be
- # non-null if the function is called by a @flow
- # decorated function.
- write_to_opentelemetry_span(
- span=span,
- key=HL_TRACE_METADATA_KEY,
- value={**trace_metadata, "is_flow_log": False},
- )
- # Add Trace metadata to the context for the children
- # Spans to be able to link to the parent Span
- push_trace_context(
- {
- **trace_metadata,
- "trace_parent_id": span.get_span_context().span_id,
- "is_flow_log": False,
- },
+ span_id = span.get_span_context().span_id
+ if span.parent:
+ span_parent_id = span.parent.span_id
+ else:
+ span_parent_id = None
+ parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id, {})
+ if parent_trace_metadata:
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ trace_id=parent_trace_metadata['trace_id'],
+ trace_parent_id=span_parent_id,
+ is_flow_log=False
)
write_to_opentelemetry_span(
@@ -111,13 +108,6 @@ def wrapper(*args, **kwargs):
# Call the decorated function
output = func(*args, **kwargs)
- # All children Spans have been created when the decorated function returns
- # Remove the Trace metadata from the context so the siblings can have
- # their children linked properly
- if trace_metadata:
- # Go back to previous trace context in Trace context
- pop_trace_context()
-
prompt_log = {"output": output}
# Write the Prompt Log to the Span on HL_LOG_OT_KEY
@@ -130,6 +120,15 @@ def wrapper(*args, **kwargs):
# Return the output of the decorated function
return output
+ wrapper.file = File( # type: ignore
+ path=path if path else func.__name__,
+ type="prompt",
+ version=prompt_kernel,
+ is_decorated=True,
+ id=None,
+ callable=wrapper,
+ )
+
return wrapper
return decorator
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 0334b0a7..d46ebc16 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -4,12 +4,19 @@
import typing
import uuid
from functools import wraps
+from inspect import Parameter
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
+from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.trace import Tracer
-from humanloop.otel import get_trace_parent_metadata, pop_trace_context, push_trace_context
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
+from humanloop.eval_utils import File
+from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
+from humanloop.otel.constants import (
+ HL_FILE_OT_KEY,
+ HL_LOG_OT_KEY,
+ HL_OT_EMPTY_VALUE,
+)
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
@@ -32,32 +39,24 @@ def decorator(func: Callable):
strict=strict,
)
- # Mypy complains about adding attribute on function but it's nice UX
+ # Mypy complains about adding attribute on function, but it's nice UX
func.json_schema = tool_kernel["function"] # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
+ span: ReadableSpan
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
- trace_metadata = get_trace_parent_metadata()
-
- if trace_metadata:
- # Add Trace metadata to the Span so it can be correctly
- # linked to the parent Span. trace_metadata will be
- # non-null if the function is called by a @flow
- # decorated function.
- write_to_opentelemetry_span(
- span=span,
- key=HL_TRACE_METADATA_KEY,
- value={**trace_metadata, "is_flow_log": False},
- )
- # Add Trace metadata to the context for the children
- # Spans to be able to link to the parent Span
- push_trace_context(
- {
- **trace_metadata,
- "trace_parent_id": span.get_span_context().span_id,
- "is_flow_log": False,
- }
+ span_id = span.get_span_context().span_id
+ if span.parent:
+ span_parent_id = span.parent.span_id
+ else:
+ span_parent_id = None
+ parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id)
+ if parent_trace_metadata:
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ span_id=span_id,
+ trace_parent_id=span_parent_id,
+ is_flow_log=False,
)
# Write the Tool Kernel to the Span on HL_FILE_OT_KEY
@@ -73,12 +72,6 @@ def wrapper(*args, **kwargs):
# Call the decorated function
output = func(*args, **kwargs)
- # All children Spans have been created when the decorated function returns
- # Remove the Trace metadata from the context so the siblings can have
- # their children linked properly
- if trace_metadata:
- pop_trace_context()
-
# Populate known Tool Log attributes
tool_log = {
"inputs": args_to_inputs(func, args, kwargs),
@@ -96,6 +89,15 @@ def wrapper(*args, **kwargs):
# Return the output of the decorated function
return output
+ func.file = File( # type: ignore
+ path=path if path else func.__name__,
+ type="tool",
+ version=tool_kernel,
+ is_decorated=True,
+ id=None,
+ callable=wrapper,
+ )
+
return wrapper
return decorator
@@ -211,7 +213,7 @@ def _parse_annotation(annotation: typing.Type) -> Union[list, tuple]:
the rest describing the inner types.
Note that for nested types that lack inner type, e.g. list instead of
- list[str], the inner type is set to inspect._empty. This edge case is
+ list[str], the inner type is set to Parameter.empty. This edge case is
handled by _annotation_parse_to_json_schema.
Examples:
@@ -240,8 +242,8 @@ def _parse_annotation(annotation: typing.Type) -> Union[list, tuple]:
origin = typing.get_origin(annotation)
if origin is None:
# Either not a nested type or no type hint
- # inspect._empty is used for parameters without type hints
- if annotation not in (str, int, float, bool, inspect._empty, dict, list, tuple):
+ # Parameter.empty is used for parameters without type hints
+ if annotation not in (str, int, float, bool, Parameter.empty, dict, list, tuple):
raise ValueError(f"Unsupported type hint: {annotation}")
return [annotation]
if origin is list:
@@ -305,6 +307,7 @@ def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Un
"items": {
"type": "array",
"items": {"type": "string"}
+ }
}
tuple[str, int, list[str]] ->
@@ -338,6 +341,7 @@ def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Un
"items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
}
}
+ }
Optional[list] ->
{
@@ -370,10 +374,10 @@ def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Un
# list annotation with no type hints
if isinstance(arg, tuple):
# Support Optional annotation
- arg = (list, [inspect._empty])
+ arg = (list, [Parameter.empty])
else:
# Support non-Optional list annotation
- arg = [list, [inspect._empty]]
+ arg = [list, [Parameter.empty]]
arg_type = {
"type": "array",
"items": _annotation_parse_to_json_schema(arg[1]),
@@ -382,9 +386,9 @@ def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Un
if len(arg) == 1:
# dict annotation with no type hints
if isinstance(arg, tuple):
- arg = (dict, [inspect._empty], [inspect._empty])
+ arg = (dict, [Parameter.empty], [Parameter.empty])
else:
- arg = [dict, [inspect._empty], [inspect._empty]]
+ arg = [dict, [Parameter.empty], [Parameter.empty]]
arg_type = {
"type": "object",
"properties": {
@@ -400,7 +404,7 @@ def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Un
arg_type = {"type": "number"}
if arg[0] is builtins.bool:
arg_type = {"type": "boolean"}
- if arg[0] is inspect._empty:
+ if arg[0] is Parameter.empty:
# JSON Schema dropped support for 'any' type, we allow any type as a workaround
arg_type = {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
diff --git a/src/humanloop/eval_utils.py b/src/humanloop/eval_utils/__init__.py
similarity index 63%
rename from src/humanloop/eval_utils.py
rename to src/humanloop/eval_utils/__init__.py
index e0c2b424..ba70ca94 100644
--- a/src/humanloop/eval_utils.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -9,49 +9,47 @@
"""
import logging
+import threading
+import typing
+from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from functools import partial
import inspect
from logging import INFO
-from pydantic import BaseModel, ValidationError
+
+from pydantic import ValidationError
from typing import Callable, Sequence, Literal, Union, Optional, List, Dict, Tuple
-from typing_extensions import NotRequired, TypedDict
import time
import sys
-import json
-from concurrent.futures import ThreadPoolExecutor, as_completed
-from .client import BaseHumanloop
-from .core.api_error import ApiError
+
+from humanloop import PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse
+from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.client import BaseHumanloop
+from humanloop.core.api_error import ApiError
# We use TypedDicts for requests, which is consistent with the rest of the SDK
-from .requests import FlowKernelRequestParams as FlowDict
-from .requests import PromptKernelRequestParams as PromptDict
-from .requests import ToolKernelRequestParams as ToolDict
-from .requests import CreateDatapointRequestParams as DatapointDict
-from .requests import ExternalEvaluatorRequestParams as ExternalEvaluator
-from .requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
-from .requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
-from .requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
-
-
-# Responses are Pydantic models, we leverage them for improved request validation
-from .types import FlowKernelRequest as Flow
-from .types import PromptKernelRequest as Prompt
-from .types import ToolKernelRequest as Tool
-from .types import BooleanEvaluatorStatsResponse as BooleanStats
-from .types import NumericEvaluatorStatsResponse as NumericStats
-from .types import (
- UpdateDatesetAction as UpdateDatasetAction,
-) # TODO: fix original type typo
-from .types import DatapointResponse as Datapoint
-from .types import (
- EvaluationStats,
- RunStatsResponse,
- EvaluatorArgumentsType,
- EvaluatorReturnTypeEnum,
- EvaluationResponse,
-)
+from humanloop.eval_utils.shared import add_log_to_evaluation
+from humanloop.eval_utils.context import EvaluationContext, EVALUATION_CONTEXT
+from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests import PromptKernelRequestParams as PromptDict
+from humanloop.requests import ToolKernelRequestParams as ToolDict
+from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
+from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
+from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
+from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
+
+
+# Responses are Pydantic models and we leverage them for improved request validation
+from humanloop.types import FlowKernelRequest as Flow
+from humanloop.types import PromptKernelRequest as Prompt
+from humanloop.types import ToolKernelRequest as Tool
+from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
+from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
+from humanloop.types import DatapointResponse as Datapoint
+from humanloop.types import EvaluationStats, EvaluationResponse
+from humanloop.types.evaluation_run_response import EvaluationRunResponse
+from humanloop.types.run_stats_response import RunStatsResponse
# Setup logging
logger = logging.getLogger(__name__)
@@ -76,74 +74,9 @@
RESET = "\033[0m"
-class Identifiers(TypedDict):
- """Common identifiers for the objects required to run an Evaluation."""
-
- id: NotRequired[str]
- """The ID of the File on Humanloop."""
- path: NotRequired[str]
- """The path of the File on Humanloop."""
-
-
-class File(Identifiers):
- """A File on Humanloop (Flow, Prompt, Tool, Evaluator)."""
-
- type: NotRequired[FileType]
- """The type of File this callable relates to on Humanloop."""
- version: NotRequired[Version]
- """The contents uniquely define the version of the File on Humanloop."""
- callable: Callable
- """The function being evaluated.
- It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`.
- If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`.
- It should return a string or json serializable output.
- """
-
-
-class Dataset(Identifiers):
- datapoints: NotRequired[Sequence[DatapointDict]]
- """The datapoints to map your function over to produce the outputs required by the evaluation."""
- action: NotRequired[UpdateDatasetAction]
- """How to update the Dataset given the provided Datapoints;
- `set` replaces the existing Datapoints and `add` appends to the existing Datapoints."""
-
-
-class Evaluator(Identifiers):
- """The Evaluator to provide judgments for this Evaluation."""
-
- args_type: NotRequired[EvaluatorArgumentsType]
- """The type of arguments the Evaluator expects - only required for local Evaluators."""
- return_type: NotRequired[EvaluatorReturnTypeEnum]
- """The type of return value the Evaluator produces - only required for local Evaluators."""
- callable: NotRequired[Callable]
- """The function to run on the logs to produce the judgment - only required for local Evaluators."""
- threshold: NotRequired[float]
- """The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail."""
-
-
-class EvaluatorCheck(BaseModel):
- """Summary data for an Evaluator check."""
-
- path: str
- """The path of the Evaluator used in the check."""
- # TODO: Add number valence and improvement check
- # improvement_check: bool
- # """Whether the latest version of your function has improved across the Dataset for a specific Evaluator."""
- score: float
- """The score of the latest version of your function for a specific Evaluator."""
- delta: float
- """The change in score since the previous version of your function for a specific Evaluator."""
- threshold: Optional[float]
- """The threshold to check the Evaluator against."""
- threshold_check: Optional[bool]
- """Whether the latest version has an average Evaluator result above a threshold."""
- evaluation_id: str
- """The ID of the corresponding Evaluation."""
-
-
def _run_eval(
client: BaseHumanloop,
- file: File,
+ file: Union[File, Callable],
name: Optional[str],
dataset: Dataset,
evaluators: Optional[Sequence[Evaluator]] = None,
@@ -161,58 +94,67 @@ def _run_eval(
:param workers: the number of threads to process datapoints using your function concurrently.
:return: per Evaluator checks.
"""
+ global _PROGRESS_BAR
+
+ if isinstance(file, Callable): # type: ignore
+ # Decorated function
+ file_: File = file.file # type: ignore
+ else:
+ file_ = file
+
+ is_decorated = file_.pop("is_decorated", False)
# Get or create the file on Humanloop
- version = file.pop("version", {})
+ version = file_.pop("version", {})
# Raise error if one of path or id not provided
- if not file.get("path") and not file.get("id"):
+ if not file_.get("path") and not file_.get("id"):
raise ValueError("You must provide a path or id in your `file`.")
# Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow`
try:
- type_ = file.pop("type")
+ type_ = typing.cast(FileType, file_.pop("type"))
logger.info(
- f"{CYAN}Evaluating your {type_} function corresponding to `{file['path']}` on Humanloop{RESET} \n\n"
+ f"{CYAN}Evaluating your {type_} function corresponding to `{file_['path']}` on Humanloop{RESET} \n\n"
)
except KeyError as _:
type_ = "flow"
logger.warning("No `file` type specified, defaulting to flow.")
# If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop.
- function_ = None
- try:
- function_ = file.pop("callable")
- except KeyError as _:
+ function_ = typing.cast(Optional[Callable], file_.pop("callable", None))
+ if function_ is None:
if type_ == "flow":
raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.")
else:
logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.")
- file_dict = {**file, **version}
+ custom_logger = file_.pop("custom_logger", None)
+ file_dict = {**file_, **version}
+ hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]
if type_ == "flow":
# Be more lenient with Flow versions as they are arbitrary json
try:
- Flow.parse_obj(version)
+ Flow.model_validate(version)
except ValidationError:
flow_version = {"attributes": version}
- file_dict = {**file, **flow_version}
+ file_dict = {**file_, **flow_version}
hl_file = client.flows.upsert(**file_dict)
elif type_ == "prompt":
try:
- _ = Prompt.parse_obj(version)
+ Prompt.model_validate(version)
except ValidationError as error_:
- logger.error(msg=f"Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
+ logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
raise error_
hl_file = client.prompts.upsert(**file_dict)
elif type_ == "tool":
try:
- _ = Tool.parse_obj(version)
+ Tool.model_validate(version)
except ValidationError as error_:
- logger.error(msg=f"Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
+ logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
raise error_
hl_file = client.tools.upsert(**file_dict)
@@ -250,7 +192,11 @@ def _run_eval(
attributes={"code": inspect.getsource(eval_function)},
evaluator_type="external",
)
- _ = client.evaluators.upsert(id=evaluator.get("id"), path=evaluator.get("path"), spec=spec)
+ _ = client.evaluators.upsert(
+ id=evaluator.get("id"),
+ path=evaluator.get("path"),
+ spec=spec,
+ )
# Validate upfront that the local Evaluators and Dataset fit
requires_target = False
@@ -288,21 +234,13 @@ def _run_eval(
raise ValueError(f"Evaluation with name {name} not found.")
# Create a new Run
- run = client.evaluations.create_run(
+ run: EvaluationRunResponse = client.evaluations.create_run(
id=evaluation.id,
dataset={"version_id": hl_dataset.version_id},
orchestrated=False,
)
-
# Every Run will generate a new batch of Logs
run_id = run.id
- log_func = _get_log_func(
- client=client,
- type_=type_,
- file_id=hl_file.id,
- version_id=hl_file.version_id,
- run_id=run_id,
- )
# Define the function to execute your function in parallel and Log to Humanloop
def process_datapoint(datapoint: Datapoint):
@@ -312,62 +250,62 @@ def process_datapoint(datapoint: Datapoint):
if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
output = function_(**datapoint_dict["inputs"], messages=datapoint_dict["messages"])
else:
- output = function_(**datapoint_dict["inputs"])
-
- if not isinstance(output, str):
- try:
- output = json.dumps(output)
- # throw error if it fails to serialize
- except Exception as _:
- raise ValueError(f"Your {type_}'s `callable` must return a string or a JSON serializable object.")
- log = log_func(
- inputs=datapoint.inputs,
- output=output,
- source_datapoint_id=datapoint.id,
- start_time=start_time,
- end_time=datetime.now(),
- )
- except Exception as e:
- log = log_func(
- inputs=datapoint.inputs,
- error=str(e),
- source_datapoint_id=datapoint.id,
- start_time=start_time,
- end_time=datetime.now(),
+ function_(datapoint_dict["inputs"])
+
+ else:
+ # Define the function to execute your function in parallel and Log to Humanloop
+ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
+ log_func = _get_log_func(
+ client=client,
+ file_type=type_,
+ file_id=hl_file.id,
+ version_id=hl_file.version_id,
+ run_id=run_id,
)
- logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {datapoint.id}. \n Error: {str(e)}")
- # Apply local Evaluators
- for local_evaluator in local_evaluators:
+ start_time = datetime.now()
+ datapoint_dict = dp.dict()
try:
- start_time = datetime.now()
- eval_function = local_evaluator["callable"]
- if local_evaluator["args_type"] == "target_required":
- judgment = eval_function(log.dict(), datapoint_dict["target"])
+ if "messages" in datapoint_dict:
+ output = function_(
+ **datapoint_dict["inputs"],
+ messages=datapoint_dict["messages"],
+ )
else:
- judgment = eval_function(log.dict())
-
- _ = client.evaluators.log(
- parent_id=log.id,
- id=local_evaluator.get("id"),
- path=local_evaluator.get("path"),
- judgment=judgment,
- start_time=start_time,
- end_time=datetime.now(),
- )
+ output = function_(**datapoint_dict["inputs"])
+ if custom_logger:
+ log = function_(client=client, output=output)
+ else:
+ if not isinstance(output, str):
+ raise ValueError(
+ f"Your {type_}'s `callable` must return a string if you do not provide a custom logger."
+ )
+ log = log_func(
+ inputs=dp.inputs,
+ output=output,
+ source_datapoint_id=dp.id,
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
except Exception as e:
- _ = client.evaluators.log(
- parent_id=log.id,
- path=local_evaluator.get("path"),
- id=local_evaluator.get("id"),
+ log = log_func(
+ inputs=dp.inputs,
error=str(e),
+ source_datapoint_id=dp.id,
start_time=start_time,
end_time=datetime.now(),
)
- logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
+ logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
+
+ add_log_to_evaluation(
+ client=client,
+ log=log,
+ datapoint_target=dp.target,
+ local_evaluators=local_evaluators,
+ )
+ _PROGRESS_BAR.increment()
# Execute the function and send the logs to Humanloop in parallel
- total_datapoints = len(hl_dataset.datapoints)
logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n")
logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}")
logger.info(f"{CYAN}Run ID: {run_id}{RESET}")
@@ -377,12 +315,14 @@ def process_datapoint(datapoint: Datapoint):
logger.info(
f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} "
)
- completed_tasks = 0
with ThreadPoolExecutor(max_workers=workers) as executor:
- futures = [executor.submit(process_datapoint, datapoint) for datapoint in hl_dataset.datapoints]
- for _ in as_completed(futures):
- completed_tasks += 1
- _progress_bar(total_datapoints, completed_tasks)
+ for datapoint in hl_dataset.datapoints:
+ executor.submit(
+ process_datapoint,
+ datapoint,
+ hl_file.id,
+ run_id,
+ )
else:
# TODO: trigger run when updated API is available
logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}")
@@ -445,7 +385,7 @@ def process_datapoint(datapoint: Datapoint):
def _get_log_func(
client: BaseHumanloop,
- type_: FileType,
+ file_type: FileType,
file_id: str,
version_id: str,
run_id: str,
@@ -458,16 +398,16 @@ def _get_log_func(
"version_id": version_id,
"run_id": run_id,
}
- if type_ == "flow":
+ if file_type == "flow":
return partial(client.flows.log, **log_request, trace_status="complete")
- elif type_ == "prompt":
+ elif file_type == "prompt":
return partial(client.prompts.log, **log_request)
- elif type_ == "evaluator":
+ elif file_type == "evaluator":
return partial(client.evaluators.log, **log_request)
- elif type_ == "tool":
+ elif file_type == "tool":
return partial(client.tools.log, **log_request)
else:
- raise NotImplementedError(f"Unsupported File version: {type_}")
+ raise NotImplementedError(f"Unsupported File version: {file_type}")
def get_score_from_evaluator_stat(
@@ -485,41 +425,51 @@ def get_score_from_evaluator_stat(
return score
-def _progress_bar(total: int, progress: int):
- """Simple progress bar for CLI with ETA."""
-
- if total <= 0:
- total = 1
-
- if not hasattr(_progress_bar, "start_time"):
- _progress_bar.start_time = time.time()
-
- bar_length = 40
- block = int(round(bar_length * progress / total))
- bar = "#" * block + "-" * (bar_length - block)
-
- percentage = (progress / total) * 100
- elapsed_time = time.time() - _progress_bar.start_time
- time_per_item = elapsed_time / progress if progress > 0 else 0
- eta = (total - progress) * time_per_item
+class _SimpleProgressBar:
+ def __init__(self, total: int):
+ if total <= 0:
+ self._total = 1
+ else:
+ self._total = total
+ self._progress = 0
+ self._lock = threading.Lock()
+ self._start_time = None
+
+ def increment(self):
+ with self._lock:
+ self._progress += 1
+ if self._start_time is None:
+ self._start_time = time.time()
+
+ bar_length = 40
+ block = int(round(bar_length * self._progress / self._total))
+ bar = "#" * block + "-" * (bar_length - block)
+
+ percentage = (self._progress / self._total) * 100
+ elapsed_time = time.time() - self._start_time
+ time_per_item = elapsed_time / self._progress if self._progress > 0 else 0
+ eta = (self._total - self._progress) * time_per_item
+
+ progress_display = f"\r[{bar}] {self._progress}/{self._total}"
+ progress_display += f" ({percentage:.2f}%)"
+
+ if self._progress < self._total:
+ progress_display += f" | ETA: {int(eta)}s"
+ else:
+ progress_display += " | DONE"
- progress_display = f"\r[{bar}] {progress}/{total}"
- progress_display += f" ({percentage:.2f}%)"
+ sys.stderr.write(progress_display)
- if progress < total:
- progress_display += f" | ETA: {int(eta)}s"
- else:
- progress_display += " | DONE"
- _progress_bar.start_time = None
+ if self._progress >= self._total:
+ sys.stderr.write("\n")
- sys.stderr.write(progress_display)
- if progress >= total:
- sys.stderr.write("\n")
+_PROGRESS_BAR = None
def get_evaluator_stats_by_path(
- stat: RunStatsResponse, evaluation: EvaluationResponse
+ stat: RunStatsResponse,
+ evaluation: EvaluationResponse,
) -> Dict[str, Union[NumericStats, BooleanStats]]:
"""Get the Evaluator stats by path."""
# TODO: Update the API so this is not necessary
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
new file mode 100644
index 00000000..bdfd0a88
--- /dev/null
+++ b/src/humanloop/eval_utils/context.py
@@ -0,0 +1,13 @@
+from typing import TypedDict, Callable
+import typing
+from contextvars import ContextVar
+
+
+class EvaluationContext(TypedDict):
+ source_datapoint_id: str
+ upload_callback: Callable[[dict], None]
+ evaluated_file_id: str
+ run_id: str
+
+
+EVALUATION_CONTEXT: ContextVar[typing.Optional[EvaluationContext]] = ContextVar("__EVALUATION_CONTEXT")
diff --git a/src/humanloop/eval_utils/domain.py b/src/humanloop/eval_utils/domain.py
new file mode 100644
index 00000000..8e65503a
--- /dev/null
+++ b/src/humanloop/eval_utils/domain.py
@@ -0,0 +1,111 @@
+from typing import Callable, Literal, Optional, Sequence, TypedDict, Union
+from typing_extensions import NotRequired
+
+from pydantic import BaseModel
+
+# We use TypedDicts for requests, which is consistent with the rest of the SDK
+from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests import PromptKernelRequestParams as PromptDict
+from humanloop.requests import ToolKernelRequestParams as ToolDict
+from humanloop.requests import CreateDatapointRequestParams as DatapointDict
+from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
+from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
+from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
+from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
+
+# Responses are Pydantic models and we leverage them for improved request validation
+from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo
+from humanloop.types import (
+ EvaluatorArgumentsType,
+ EvaluatorReturnTypeEnum,
+)
+
+
+EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
+Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
+FileType = Literal["flow", "prompt", "tool", "evaluator"]
+
+
+class Identifiers(TypedDict):
+ """Common identifiers for the objects required to run an Evaluation."""
+
+ id: NotRequired[str]
+ """The ID of the File on Humanloop."""
+ path: NotRequired[str]
+ """The path of the File on Humanloop."""
+
+
+class File(Identifiers):
+ """A File on Humanloop (Flow, Prompt, Tool, Evaluator)."""
+
+ type: NotRequired[FileType]
+ """The type of File this callable relates to on Humanloop."""
+ version: NotRequired[Version]
+ """The contents uniquely define the version of the File on Humanloop."""
+ callable: NotRequired[Callable]
+ """The function being evaluated.
+ It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`.
+ If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`.
+ It should return a single string output. If not, you must provide a `custom_logger`.
+ """
+ custom_logger: NotRequired[Callable]
+ """function that logs the output of your function to Humanloop, replacing the default logging.
+ If provided, it will be called as follows:
+ ```
+ output = callable(**datapoint.inputs).
+ log = custom_logger(client, output)
+ ```
+ Inside the custom_logger, you can use the Humanloop `client` to log the output of your function.
+ If not provided your pipeline must return a single string.
+ """
+ is_decorated: NotRequired[Literal[True]]
+
+
+class Dataset(Identifiers):
+ datapoints: Sequence[DatapointDict]
+ """The datapoints to map your function over to produce the outputs required by the evaluation."""
+ action: NotRequired[UpdateDatasetAction]
+ """How to update the Dataset given the provided Datapoints;
+ `set` replaces the existing Datapoints and `add` appends to the existing Datapoints."""
+
+
+class Evaluator(Identifiers):
+ """The Evaluator to provide judgments for this Evaluation."""
+
+ args_type: NotRequired[EvaluatorArgumentsType]
+ """The type of arguments the Evaluator expects - only required for local Evaluators."""
+ return_type: NotRequired[EvaluatorReturnTypeEnum]
+ """The type of return value the Evaluator produces - only required for local Evaluators."""
+ callable: NotRequired[Callable]
+ """The function to run on the logs to produce the judgment - only required for local Evaluators."""
+ custom_logger: NotRequired[Callable]
+ """optional function that logs the output judgment from your Evaluator to Humanloop, if provided, it will be called as follows:
+ ```
+ judgment = callable(log_dict)
+ log = custom_logger(client, judgment)
+ ```
+ Inside the custom_logger, you can use the Humanloop `client` to log the judgment to Humanloop.
+ If not provided your function must return a single string and by default the code will be used to inform the version of the external Evaluator on Humanloop.
+ """
+ threshold: NotRequired[float]
+ """The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail."""
+
+
+class EvaluatorCheck(BaseModel):
+ """Summary data for an Evaluator check."""
+
+ path: str
+ """The path of the Evaluator used in the check."""
+ # TODO: Add number valence and improvement check
+ # improvement_check: bool
+ # """Whether the latest version of your function has improved across the Dataset for a specific Evaluator."""
+ score: float
+ """The score of the latest version of your function for a specific Evaluator."""
+ delta: float
+ """The change in score since the previous version of your function for a specific Evaluator."""
+ threshold: Optional[float]
+ """The threshold to check the Evaluator against."""
+ threshold_check: Optional[bool]
+ """Whether the latest version has an average Evaluator result above a threshold."""
+ evaluation_id: str
+ """The ID of the corresponding Evaluation."""
diff --git a/src/humanloop/eval_utils/shared.py b/src/humanloop/eval_utils/shared.py
new file mode 100644
index 00000000..b3400839
--- /dev/null
+++ b/src/humanloop/eval_utils/shared.py
@@ -0,0 +1,51 @@
+from datetime import datetime
+import logging
+import typing
+
+from humanloop.base_client import BaseHumanloop
+from humanloop.eval_utils.domain import Evaluator
+from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
+
+
+logger = logging.getLogger("humanloop.sdk")
+
+
+def add_log_to_evaluation(
+ client: BaseHumanloop,
+ log: dict,
+ datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
+ local_evaluators: list[Evaluator],
+):
+ for local_evaluator in local_evaluators:
+ start_time = datetime.now()
+ try:
+ eval_function = local_evaluator["callable"]
+ if local_evaluator["args_type"] == "target_required":
+ judgement = eval_function(
+ log,
+ datapoint_target,
+ )
+ else:
+ judgement = eval_function(log)
+
+ if local_evaluator.get("custom_logger", None):
+ local_evaluator["custom_logger"](judgement, start_time, datetime.now())
+ else:
+ _ = client.evaluators.log(
+ parent_id=log['id'],
+ judgment=judgement,
+ id=local_evaluator.get("id"),
+ path=local_evaluator.get("path"),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ except Exception as e:
+ _ = client.evaluators.log(
+ parent_id=log['id'],
+ path=local_evaluator.get("path"),
+ id=local_evaluator.get("id"),
+ error=str(e),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index e184c024..4b461f10 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -1,10 +1,9 @@
from typing import Optional
-from opentelemetry import baggage
+from mypy.build import TypedDict
from opentelemetry.context import Context
from opentelemetry.sdk.trace import TracerProvider
-from humanloop.otel.constants import HL_TRACE_METADATA_KEY
from humanloop.otel.helpers import module_is_installed
"""
@@ -51,42 +50,16 @@ def instrument_provider(provider: TracerProvider):
GroqInstrumentor().instrument(tracer_provider=provider)
- # NOTE: ReplicateInstrumentor would require us to bump minimum Python version from 3.8 to 3.9
- # TODO: Do a PR against the open-source ReplicateInstrumentor to support lower Python versions
- # if module_is_installed("replicate"):
- # from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
+ if module_is_installed("replicate"):
+ from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
- # ReplicateInstrumentor().instrument(tracer_provider=provider)
+ ReplicateInstrumentor().instrument(tracer_provider=provider)
-def push_trace_context(trace_metadata: dict):
- """Push Trace metadata for a parent Span.
+class FlowContext(TypedDict):
+ trace_id: str
+ trace_parent_id: Optional[str]
+ is_flow_log: bool
- Expected to be called when the Span is created
- and before the wrapped function is executed.
- Calling a wrapped function may create children
- Spans, which will need to peek at the parent's
- metadata.
- """
- new_context = baggage.set_baggage(
- HL_TRACE_METADATA_KEY,
- trace_metadata,
- _BAGGAGE_CONTEXT_STACK[-1],
- )
- _BAGGAGE_CONTEXT_STACK.append(new_context)
-
-
-def pop_trace_context():
- """Clear Trace metadata for a parent Span.
-
- Expected to be called after the wrapped function
- is executed. This allows Spans on the same level
- to peek at their parent Trace metadata.
- """
- _BAGGAGE_CONTEXT_STACK.pop()
-
-
-def get_trace_parent_metadata() -> Optional[object]:
- """Peek at Trace metadata stack."""
- return baggage.get_baggage(HL_TRACE_METADATA_KEY, _BAGGAGE_CONTEXT_STACK[-1])
+TRACE_FLOW_CONTEXT: dict[int, FlowContext] = {}
diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py
index c683845c..2a9c20c9 100644
--- a/src/humanloop/otel/constants.py
+++ b/src/humanloop/otel/constants.py
@@ -2,8 +2,6 @@
HL_FILE_OT_KEY = "humanloop.file"
# Attribute name prefix on Humanloop spans for log-related attributes
HL_LOG_OT_KEY = "humanloop.log"
-# Attribute name prefix on Humanloop spans for trace metadata
-HL_TRACE_METADATA_KEY = "humanloop.flow.metadata"
# OTel does not allow falsy values for top-level attributes e.g. foo
# and None only on nested attributes e.g. foo.bar
HL_OT_EMPTY_VALUE = "EMPTY"
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index f6860315..3c907d97 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,7 +1,7 @@
import json
import logging
import typing
-from queue import Queue
+from queue import Queue, Empty as EmptyQueue
from threading import Thread
from typing import Any, Optional
@@ -10,7 +10,9 @@
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.core.request_options import RequestOptions
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE, HL_TRACE_METADATA_KEY
+from humanloop.eval_utils import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.otel import TRACE_FLOW_CONTEXT
+from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -41,7 +43,7 @@ def __init__(
"""
super().__init__()
self._client = client
- self._uploaded_log_ids: dict[
+ self._span_id_to_uploaded_log_id: dict[
str, str
] = {} # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace
self._upload_queue: Queue = Queue() # Work queue for the threads uploading the spans
@@ -58,7 +60,12 @@ def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
if not self._shutdown:
for span in spans:
if is_humanloop_span(span):
- self._upload_queue.put(span)
+ try:
+ evaluation_context = EVALUATION_CONTEXT.get()
+ except LookupError:
+ # Decorators are not used in a client.evaluations.run() context
+ evaluation_context = {}
+ self._upload_queue.put((span, evaluation_context))
return SpanExportResult.SUCCESS
else:
logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans")
@@ -89,34 +96,33 @@ def _do_work(self):
dependencies uploaded. The dependency happens in a Flow Trace context, where
the Trace parent must be uploaded first. The Span Processor will send in Spans
bottoms-up, while the upload of a Trace happens top-down. If a Span did not
- have its span uploaded yet, it will be requeued to be uploaded later.
+ have its span uploaded yet, it will be re-queued to be uploaded later.
"""
# Do work while the Exporter was not instructed to
# wind down or the queue is not empty
while self._upload_queue.qsize() > 0 or not self._shutdown:
try:
- # Don't block or the thread will never see the shutdown
- # command and will get stuck
- span_to_export = self._upload_queue.get(block=False)
- except Exception:
+ # Don't block or the thread will never be notified of the shutdown
+ thread_args: tuple[ReadableSpan, EvaluationContext] = self._upload_queue.get(block=False)
+ span_to_export, evaluation_context = thread_args
+ except EmptyQueue:
continue
- try:
- trace_metadata = read_from_opentelemetry_span(
- span_to_export,
- key=HL_TRACE_METADATA_KEY,
- )
- except KeyError:
- trace_metadata = None
- if "trace_parent_id" not in trace_metadata or trace_metadata["trace_parent_id"] in self._uploaded_log_ids:
- # The Span is outside a Trace context or its parent has been uploaded
- # we can safely upload it to Humanloop
- self._export_span_dispatch(span_to_export)
- else: # The parent has not been uploaded yet
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
+ if trace_metadata is None:
+ # Span is not part of a Flow Log
+ self._export_span_dispatch(span_to_export, evaluation_context)
+ elif trace_metadata["trace_parent_id"] is None:
+ # Span is the head of a Flow Trace
+ self._export_span_dispatch(span_to_export, evaluation_context)
+ elif trace_metadata["trace_parent_id"] in self._span_id_to_uploaded_log_id:
+ # Span is part of a Flow and its parent has been uploaded
+ self._export_span_dispatch(span_to_export, evaluation_context)
+ else:
# Requeue the Span to be uploaded later
- self._upload_queue.put(span_to_export)
+ self._upload_queue.put((span_to_export, evaluation_context))
self._upload_queue.task_done()
- def _export_span_dispatch(self, span: ReadableSpan) -> None:
+ def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
if "prompt" in hl_file:
@@ -127,9 +133,9 @@ def _export_span_dispatch(self, span: ReadableSpan) -> None:
export_func = self._export_flow
else:
raise NotImplementedError(f"Unknown span type: {hl_file}")
- export_func(span=span)
+ export_func(span=span, evaluation_context=evaluation_context)
- def _export_prompt(self, span: ReadableSpan) -> None:
+ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(
span,
key=HL_FILE_OT_KEY,
@@ -138,11 +144,11 @@ def _export_prompt(self, span: ReadableSpan) -> None:
span,
key=HL_LOG_OT_KEY,
)
- # NOTE: Due to Otel conventions, attributes with value of None are removed
+ # NOTE: Due to OTel conventions, attributes with value of None are removed
# If not present, instantiate as empty dictionary
if "inputs" not in log_object:
log_object["inputs"] = {}
- # NOTE: Due to Otel conventions, lists are read as dictionaries
+ # NOTE: Due to OTel conventions, lists are read as dictionaries
# E.g. ["a", "b"] -> {"0": "a", "1": "b"}
# We must convert the dictionary back to a list
# See humanloop.otel.helpers._list_to_ott
@@ -150,18 +156,10 @@ def _export_prompt(self, span: ReadableSpan) -> None:
log_object["messages"] = []
else:
log_object["messages"] = list(log_object["messages"].values())
- trace_metadata: Optional[dict[str, str]]
- try:
- trace_metadata = read_from_opentelemetry_span(
- span,
- key=HL_TRACE_METADATA_KEY,
- ) # type: ignore
- except KeyError:
- trace_metadata = None
- if trace_metadata:
- trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
- else:
- trace_parent_id = None
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ trace_parent_id = self._span_id_to_uploaded_log_id.get(
+ trace_metadata.get("trace_parent_id"),
+ )
prompt: PromptKernelRequestParams = file_object["prompt"]
path: str = file_object["path"]
if not isinstance(log_object["output"], str):
@@ -170,28 +168,27 @@ def _export_prompt(self, span: ReadableSpan) -> None:
log_object["output"] = json.dumps(log_object["output"])
if "attributes" not in prompt or not prompt["attributes"]:
prompt["attributes"] = {}
- response = self._client.prompts.log(
+ log_response = self._client.prompts.log(
path=path,
prompt=prompt,
**log_object,
trace_parent_id=trace_parent_id,
+ source_datapoint_id=evaluation_context.get("source_datapoint_id"),
+ run_id=evaluation_context.get("run_id"),
request_options=RequestOptions(max_retries=3),
)
- self._uploaded_log_ids[span.context.span_id] = response.id
+ if evaluation_context and log_response.prompt_id == evaluation_context["evaluated_file_id"]:
+ log_object["id"] = log_response.id
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- def _export_tool(self, span: ReadableSpan) -> None:
+ def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
- trace_metadata: Optional[dict[str, str]]
- try:
- # HL_TRACE_METADATA_KEY is a dict[str, str], has no nesting
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
- except KeyError:
- trace_metadata = None
- if trace_metadata:
- trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
- else:
- trace_parent_id = None
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ trace_parent_id = self._span_id_to_uploaded_log_id.get(
+ trace_metadata.get("trace_parent_id"),
+ )
tool = file_object["tool"]
if tool.get("attributes", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
tool["attributes"] = {}
@@ -202,27 +199,25 @@ def _export_tool(self, span: ReadableSpan) -> None:
# Output expected to be a string, if decorated function
# does not return one, jsonify it
log_object["output"] = json.dumps(log_object["output"])
- response = self._client.tools.log(
+ log_response = self._client.tools.log(
path=path,
tool=tool,
**log_object,
trace_parent_id=trace_parent_id,
request_options=RequestOptions(max_retries=3),
)
- self._uploaded_log_ids[span.context.span_id] = response.id
+ if evaluation_context and log_response.tool_id == evaluation_context["evaluated_file_id"]:
+ log_object["id"] = log_response.id
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- def _export_flow(self, span: ReadableSpan) -> None:
+ def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
- trace_metadata: Optional[dict[str, str]]
- try:
- trace_metadata = read_from_opentelemetry_span(span, key=HL_TRACE_METADATA_KEY) # type: ignore
- except KeyError:
- trace_metadata = None
- if trace_metadata and "trace_parent_id" in trace_metadata:
- trace_parent_id = self._uploaded_log_ids[trace_metadata["trace_parent_id"]]
- else:
- trace_parent_id = None
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ trace_parent_id = self._span_id_to_uploaded_log_id.get(
+ trace_metadata.get("trace_parent_id"),
+ )
# Cannot write falsy values except None in OTel Span attributes
# If a None write is attempted then the attribute is removed
# making it impossible to distinguish between a Flow Span and
@@ -237,11 +232,15 @@ def _export_flow(self, span: ReadableSpan) -> None:
# Output expected to be a string, if decorated function
# does not return one, jsonify it
log_object["output"] = json.dumps(log_object["output"])
- response = self._client.flows.log(
+ log_response = self._client.flows.log(
path=path,
flow=flow,
**log_object,
trace_parent_id=trace_parent_id,
+ source_datapoint_id=evaluation_context.get("source_datapoint_id"),
+ run_id=evaluation_context.get("run_id"),
request_options=RequestOptions(max_retries=3),
)
- self._uploaded_log_ids[span.context.span_id] = response.id
+ if evaluation_context and log_response.flow_id == evaluation_context["evaluated_file_id"]:
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 5513af93..eeaae9a4 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -45,7 +45,7 @@ def _list_to_ott(lst: NestedList) -> NestedDict:
def write_to_opentelemetry_span(
span: ReadableSpan,
- value: Union[NestedDict, NestedList],
+ value: Union[NestedDict, NestedList, AttributeValue],
key: str = "",
) -> None:
"""Write a Python object to the OpenTelemetry Span's attributes. Reverse of :func:`read_from_opentelemetry_span`.
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 168cb4b5..578ced97 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -16,7 +16,6 @@
read_from_opentelemetry_span,
write_to_opentelemetry_span,
)
-from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
from humanloop.types.prompt_kernel_request import PromptKernelRequest
logger = logging.getLogger("humanloop.sdk")
@@ -116,7 +115,7 @@ def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
- # TODO: Use children_spans in the future
+ # NOTE: Use children_spans if needed
flow_log = read_from_opentelemetry_span(flow_span, key=HL_LOG_OT_KEY)
if flow_span.start_time:
flow_log["start_time"] = flow_span.start_time / 1e9
@@ -141,7 +140,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
prompt = {}
# Check if the Prompt Kernel keys were assigned default values
- # via the @prompt arguments. Otherwise use the information
+ # via the @prompt arguments. Otherwise, use the information
# from the intercepted LLM provider call
prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None)
prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type")
diff --git a/src/humanloop/requests/create_datapoint_request.py b/src/humanloop/requests/create_datapoint_request.py
index 8e9d5005..d9e2e564 100644
--- a/src/humanloop/requests/create_datapoint_request.py
+++ b/src/humanloop/requests/create_datapoint_request.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
import typing
from .chat_message import ChatMessageParams
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 634b3786..01d0e8e2 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -4,11 +4,11 @@
import time
from unittest.mock import patch
-import pytest
from humanloop.decorators.flow import flow
from humanloop.decorators.prompt import prompt
from humanloop.decorators.tool import tool
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_TRACE_METADATA_KEY
+from humanloop.otel import TRACE_FLOW_CONTEXT
+from humanloop.otel.constants import HL_FILE_OT_KEY
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.helpers import read_from_opentelemetry_span
from openai import OpenAI
@@ -100,8 +100,7 @@ def test_decorators_without_flow(
)["prompt"]
for span in spans:
# THEN no metadata related to trace is present on either of them
- with pytest.raises(KeyError):
- read_from_opentelemetry_span(span=span, key=HL_TRACE_METADATA_KEY)
+ assert TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id) is None
def test_decorators_with_flow_decorator(
@@ -136,9 +135,9 @@ def test_decorators_with_flow_decorator(
assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["tool"]
assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["prompt"]
assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
- tool_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
- prompt_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
- flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
+ tool_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)
+ prompt_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)
+ flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)
# THEN Tool span is a child of Prompt span
assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
@@ -174,10 +173,10 @@ def test_flow_decorator_flow_in_flow(
assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
assert read_from_opentelemetry_span(span=spans[4], key=HL_FILE_OT_KEY)["flow"]
- tool_trace_metadata = read_from_opentelemetry_span(span=spans[1], key=HL_TRACE_METADATA_KEY)
- prompt_trace_metadata = read_from_opentelemetry_span(span=spans[2], key=HL_TRACE_METADATA_KEY)
- nested_flow_trace_metadata = read_from_opentelemetry_span(span=spans[3], key=HL_TRACE_METADATA_KEY)
- flow_trace_metadata = read_from_opentelemetry_span(span=spans[4], key=HL_TRACE_METADATA_KEY)
+ tool_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)
+ prompt_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)
+ nested_flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)
+ flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[4].get_span_context().span_id)
# THEN the parent of the Tool Log is the Prompt Log
assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
@@ -290,14 +289,8 @@ def test_flow_decorator_hl_exporter_flow_inside_flow(
# THEN the second to last uploaded span is the nested Flow
flow_span = mock_export_method.call_args_list[4][0][0][0]
nested_flow_span = mock_export_method.call_args_list[3][0][0][0]
- last_span_flow_metadata = read_from_opentelemetry_span(
- span=flow_span,
- key=HL_TRACE_METADATA_KEY,
- )
- flow_span_flow_metadata = read_from_opentelemetry_span(
- span=nested_flow_span,
- key=HL_TRACE_METADATA_KEY,
- )
+ last_span_flow_metadata = TRACE_FLOW_CONTEXT.get(flow_span.get_span_context().span_id)
+ flow_span_flow_metadata = TRACE_FLOW_CONTEXT.get(nested_flow_span.get_span_context().span_id)
assert flow_span_flow_metadata["trace_parent_id"] == flow_span.context.span_id
assert last_span_flow_metadata["is_flow_log"]
assert flow_span_flow_metadata["is_flow_log"]
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 26eaa000..f2b072e3 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -102,7 +102,7 @@ def calculator(a: Optional[float], b) -> float:
return a + b
# WHEN building the Tool kernel
- # THEN the JSON schema is correctly built and `b` is is of `any` type
+ # THEN the JSON schema is correctly built and `b` is of `any` type
# NOTE: JSONSchema dropped support for 'any' type, we include all types
# as a workaround
assert calculator.json_schema == {
From 71f13dcae491476cb3d0876b921852dd859da968 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 09:44:44 +0000
Subject: [PATCH 33/70] Made Otel span I/O more robust
---
src/humanloop/decorators/flow.py | 59 +++++++-------
src/humanloop/decorators/prompt.py | 54 ++++++-------
src/humanloop/decorators/tool.py | 45 +++++------
src/humanloop/eval_utils/__init__.py | 55 +++++++------
src/humanloop/eval_utils/context.py | 2 +-
src/humanloop/eval_utils/domain.py | 18 ++---
src/humanloop/eval_utils/shared.py | 3 +-
src/humanloop/otel/constants.py | 9 +--
src/humanloop/otel/exporter.py | 57 +++++++-------
src/humanloop/otel/helpers.py | 6 +-
src/humanloop/otel/processor.py | 78 ++++++-------------
src/humanloop/requests/tool_kernel_request.py | 1 -
tests/conftest.py | 22 +++---
tests/decorators/test_flow_decorator.py | 48 ++++++------
tests/decorators/test_prompt_decorator.py | 8 +-
tests/decorators/test_tool_decorator.py | 6 +-
tests/otel/test_helpers.py | 13 +++-
tests/utils/assets/models/__init__.py | 2 +-
tests/utils/assets/models/circle.py | 1 -
.../assets/models/object_with_defaults.py | 1 -
.../models/object_with_optional_field.py | 7 +-
tests/utils/assets/models/shape.py | 5 +-
tests/utils/assets/models/square.py | 1 -
.../assets/models/undiscriminated_shape.py | 1 +
tests/utils/test_serialization.py | 4 +-
25 files changed, 239 insertions(+), 267 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index a2441e96..1644778d 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,19 +1,16 @@
import uuid
from functools import wraps
-from typing import Any, Callable, Optional
+from typing import Any, Callable, Mapping, Optional, Sequence
+from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
-from opentelemetry.sdk.trace import ReadableSpan
from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import (
- HL_FILE_OT_KEY,
- HL_LOG_OT_KEY,
- HL_OT_EMPTY_VALUE,
-)
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.requests import FlowKernelRequestParams as FlowDict
def flow(
@@ -26,8 +23,8 @@ def flow(
def decorator(func: Callable):
@wraps(func)
- def wrapper(*args, **kwargs):
- span: ReadableSpan
+ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
+ span: Span
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
span_id = span.get_span_context().span_id
if span.parent:
@@ -50,40 +47,40 @@ def wrapper(*args, **kwargs):
is_flow_log=True,
)
- # Write the Flow Kernel to the Span on HL_FILE_OT_KEY
- write_to_opentelemetry_span(
- span=span,
- key=HL_FILE_OT_KEY,
- value={
- "path": path if path else func.__name__,
- # If a None write is attempted then the attribute is removed
- # making it impossible to distinguish between a Flow Span and
- # Spans not created by Humanloop (see humanloop.otel.helpers.is_humanloop_span)
- "flow": {"attributes": attributes} if attributes else HL_OT_EMPTY_VALUE,
- },
- )
+ span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HL_FILE_TYPE_KEY, "flow")
+ if attributes:
+ print("HOWDIE", attributes)
+ write_to_opentelemetry_span(
+ span=span,
+ key=f"{HL_FILE_KEY}.flow.attributes",
+ value=attributes,
+ )
# Call the decorated function
output = func(*args, **kwargs)
+ inputs = args_to_inputs(func, args, kwargs)
+ flow_log = {}
+ if inputs:
+ flow_log["inputs"] = inputs
+ if output:
+ flow_log["output"] = output
# Write the Flow Log to the Span on HL_LOG_OT_KEY
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_OT_KEY,
- value={
- "inputs": args_to_inputs(func, args, kwargs),
- "output": output,
- },
- )
+ if flow_log:
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_KEY,
+ value=flow_log,
+ )
# Return the output of the decorated function
return output
func.file = File( # type: ignore
- id=None,
path=path if path else func.__name__,
type="flow",
- version=attributes,
+ version=FlowDict(attributes=attributes),
is_decorated=True,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 31e884e8..7e9c63aa 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,17 +1,13 @@
import uuid
from functools import wraps
-from typing import Any, Callable, Optional
+from typing import Any, Callable, Mapping, Optional, Sequence
+from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-
-from humanloop.otel.constants import (
- HL_FILE_OT_KEY,
- HL_LOG_OT_KEY,
- HL_OT_EMPTY_VALUE,
-)
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
@@ -78,7 +74,8 @@ def decorator(func: Callable):
prompt_kernel[attr_name] = attr_value # type: ignore
@wraps(func)
- def wrapper(*args, **kwargs):
+ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
+ span: Span
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
span_id = span.get_span_context().span_id
if span.parent:
@@ -88,34 +85,28 @@ def wrapper(*args, **kwargs):
parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id, {})
if parent_trace_metadata:
TRACE_FLOW_CONTEXT[span_id] = FlowContext(
- trace_id=parent_trace_metadata['trace_id'],
- trace_parent_id=span_parent_id,
- is_flow_log=False
+ trace_id=parent_trace_metadata["trace_id"], trace_parent_id=span_parent_id, is_flow_log=False
)
- write_to_opentelemetry_span(
- span=span,
- key=HL_FILE_OT_KEY,
- value={
- "path": path if path else func.__name__,
- # Values not specified in the decorator will be
- # completed with the intercepted values from the
- # Instrumentors for LLM providers
- "prompt": prompt_kernel or HL_OT_EMPTY_VALUE, # noqa: F821
- },
- )
+ span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HL_FILE_TYPE_KEY, "prompt")
+ if prompt_kernel:
+ write_to_opentelemetry_span(
+ span=span,
+ key=f"{HL_FILE_KEY}.prompt",
+ value=prompt_kernel,
+ )
# Call the decorated function
output = func(*args, **kwargs)
- prompt_log = {"output": output}
-
- # Write the Prompt Log to the Span on HL_LOG_OT_KEY
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_OT_KEY,
- value=prompt_log,
- )
+ if output:
+ prompt_log = {"output": output}
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_KEY,
+ value=prompt_log,
+ )
# Return the output of the decorated function
return output
@@ -123,9 +114,8 @@ def wrapper(*args, **kwargs):
wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="prompt",
- version=prompt_kernel,
+ version=prompt_kernel, # type: ignore
is_decorated=True,
- id=None,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index d46ebc16..fa28d55c 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -7,16 +7,11 @@
from inspect import Parameter
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
-from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.trace import Tracer
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import (
- HL_FILE_OT_KEY,
- HL_LOG_OT_KEY,
- HL_OT_EMPTY_VALUE,
-)
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
@@ -44,7 +39,6 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- span: ReadableSpan
with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
span_id = span.get_span_context().span_id
if span.parent:
@@ -60,14 +54,14 @@ def wrapper(*args, **kwargs):
)
# Write the Tool Kernel to the Span on HL_FILE_OT_KEY
- write_to_opentelemetry_span(
- span=span,
- key=HL_FILE_OT_KEY,
- value={
- "path": path if path else func.__name__,
- "tool": tool_kernel,
- },
- )
+ span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HL_FILE_TYPE_KEY, "tool")
+ if tool_kernel:
+ write_to_opentelemetry_span(
+ span=span,
+ key=f"{HL_FILE_KEY}.tool",
+ value=tool_kernel,
+ )
# Call the decorated function
output = func(*args, **kwargs)
@@ -80,11 +74,12 @@ def wrapper(*args, **kwargs):
tool_log["output"] = output
# Write the Tool Log to the Span on HL_LOG_OT_KEY
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_OT_KEY,
- value=tool_log,
- )
+ if tool_log:
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_KEY,
+ value=tool_log,
+ )
# Return the output of the decorated function
return output
@@ -94,7 +89,6 @@ def wrapper(*args, **kwargs):
type="tool",
version=tool_kernel,
is_decorated=True,
- id=None,
callable=wrapper,
)
@@ -110,19 +104,22 @@ def _build_tool_kernel(
strict: bool,
) -> ToolKernelRequestParams:
"""Build ToolKernelRequest object from decorated function."""
- return ToolKernelRequestParams(
+ kernel = ToolKernelRequestParams(
source_code=textwrap.dedent(
# Remove the tool decorator from source code
inspect.getsource(func).split("\n", maxsplit=1)[1]
),
# Note: OTel complains about falsy values in attributes, so we use OT_EMPTY_ATTRIBUTE
- attributes=attributes or HL_OT_EMPTY_VALUE, # type: ignore
- setup_values=setup_values or HL_OT_EMPTY_VALUE, # type: ignore
function=_build_function_property(
func=func,
strict=strict,
),
)
+ if attributes:
+ kernel["attributes"] = attributes
+ if setup_values:
+ kernel["setup_values"] = setup_values
+ return kernel
def _build_function_property(func: Callable, strict: bool) -> ToolFunctionParams:
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index ba70ca94..a2a7d750 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -8,14 +8,17 @@
not be called directly.
"""
+import inspect
import logging
+import sys
import threading
+import time
import typing
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from functools import partial
-import inspect
from logging import INFO
+from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union
from pydantic import ValidationError
from typing import Callable, Sequence, Literal, Union, Optional, List, Dict, Tuple
@@ -23,25 +26,28 @@
import sys
-from humanloop import PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse
-from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
from humanloop.client import BaseHumanloop
from humanloop.core.api_error import ApiError
+from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
# We use TypedDicts for requests, which is consistent with the rest of the SDK
from humanloop.eval_utils.shared import add_log_to_evaluation
-from humanloop.eval_utils.context import EvaluationContext, EVALUATION_CONTEXT
+from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
+from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
+from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
from humanloop.requests import PromptKernelRequestParams as PromptDict
from humanloop.requests import ToolKernelRequestParams as ToolDict
-from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
-from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
-from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
-from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
-
+from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
+from humanloop.types import DatapointResponse as Datapoint
+from humanloop.types import EvaluationResponse, EvaluationStats, VersionStatsResponse
# Responses are Pydantic models and we leverage them for improved request validation
from humanloop.types import FlowKernelRequest as Flow
+from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
from humanloop.types import PromptKernelRequest as Prompt
from humanloop.types import ToolKernelRequest as Tool
from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
@@ -100,7 +106,7 @@ def _run_eval(
# Decorated function
file_: File = file.file # type: ignore
else:
- file_ = file
+ file_ = file # type: ignore
is_decorated = file_.pop("is_decorated", False)
@@ -140,7 +146,7 @@ def _run_eval(
except ValidationError:
flow_version = {"attributes": version}
file_dict = {**file_, **flow_version}
- hl_file = client.flows.upsert(**file_dict)
+ hl_file = client.flows.upsert(**file_dict) # type: ignore
elif type_ == "prompt":
try:
@@ -148,7 +154,7 @@ def _run_eval(
except ValidationError as error_:
logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
raise error_
- hl_file = client.prompts.upsert(**file_dict)
+ hl_file = client.prompts.upsert(**file_dict) # type: ignore
elif type_ == "tool":
try:
@@ -156,10 +162,10 @@ def _run_eval(
except ValidationError as error_:
logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
raise error_
- hl_file = client.tools.upsert(**file_dict)
+ hl_file = client.tools.upsert(**file_dict) # type: ignore
elif type_ == "evaluator":
- hl_file = client.evaluators.upsert(**file_dict)
+ hl_file = client.evaluators.upsert(**file_dict) # type: ignore
else:
raise NotImplementedError(f"Unsupported File type: {type_}")
@@ -197,6 +203,7 @@ def _run_eval(
path=evaluator.get("path"),
spec=spec,
)
+ function_ = typing.cast(Callable, function_)
# Validate upfront that the local Evaluators and Dataset fit
requires_target = False
@@ -206,7 +213,7 @@ def _run_eval(
break
if requires_target:
missing_target = 0
- for datapoint in hl_dataset.datapoints:
+ for datapoint in hl_dataset.datapoints: # type: ignore
if not datapoint.target:
missing_target += 1
if missing_target > 0:
@@ -250,7 +257,7 @@ def process_datapoint(datapoint: Datapoint):
if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
output = function_(**datapoint_dict["inputs"], messages=datapoint_dict["messages"])
else:
- function_(datapoint_dict["inputs"])
+ function_(datapoint_dict["inputs"]) # type: ignore
else:
# Define the function to execute your function in parallel and Log to Humanloop
@@ -267,14 +274,16 @@ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
datapoint_dict = dp.dict()
try:
if "messages" in datapoint_dict:
- output = function_(
+ output = function_( # type: ignore
**datapoint_dict["inputs"],
messages=datapoint_dict["messages"],
)
else:
- output = function_(**datapoint_dict["inputs"])
+ # function_ is not None at this point
+ output = function_(**datapoint_dict["inputs"]) # type: ignore
if custom_logger:
- log = function_(client=client, output=output)
+ # function_ is not None at this point
+ log = function_(client=client, output=output) # type: ignore
else:
if not isinstance(output, str):
raise ValueError(
@@ -311,7 +320,7 @@ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
logger.info(f"{CYAN}Run ID: {run_id}{RESET}")
# Generate locally if a file `callable` is provided
- if function_:
+ if function_: # type: ignore
logger.info(
f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} "
)
@@ -421,8 +430,8 @@ def get_score_from_evaluator_stat(
elif isinstance(stat, NumericStats):
score = round(stat.mean, 2)
else:
- pass
- return score
+ raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}")
+ return score # type: ignore
class _SimpleProgressBar:
@@ -478,7 +487,7 @@ def get_evaluator_stats_by_path(
evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat
for evaluator_stat in stat.evaluator_stats
}
- return evaluator_stats_by_path
+ return evaluator_stats_by_path # type: ignore
def check_evaluation_threshold(
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
index bdfd0a88..4f949fe5 100644
--- a/src/humanloop/eval_utils/context.py
+++ b/src/humanloop/eval_utils/context.py
@@ -1,6 +1,6 @@
-from typing import TypedDict, Callable
import typing
from contextvars import ContextVar
+from typing import Callable, TypedDict
class EvaluationContext(TypedDict):
diff --git a/src/humanloop/eval_utils/domain.py b/src/humanloop/eval_utils/domain.py
index 8e65503a..8aaab9f0 100644
--- a/src/humanloop/eval_utils/domain.py
+++ b/src/humanloop/eval_utils/domain.py
@@ -1,25 +1,25 @@
from typing import Callable, Literal, Optional, Sequence, TypedDict, Union
-from typing_extensions import NotRequired
from pydantic import BaseModel
+from typing_extensions import NotRequired
+
+from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
+from humanloop.requests import CreateDatapointRequestParams as DatapointDict
+from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
# We use TypedDicts for requests, which is consistent with the rest of the SDK
from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
+from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
from humanloop.requests import PromptKernelRequestParams as PromptDict
from humanloop.requests import ToolKernelRequestParams as ToolDict
-from humanloop.requests import CreateDatapointRequestParams as DatapointDict
-from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
-from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
-from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
-from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
-
-# Responses are Pydantic models and we leverage them for improved request validation
-from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo
from humanloop.types import (
EvaluatorArgumentsType,
EvaluatorReturnTypeEnum,
)
+# Responses are Pydantic models and we leverage them for improved request validation
+from humanloop.types import UpdateDatesetAction as UpdateDatasetAction # TODO: fix original type typo
EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
diff --git a/src/humanloop/eval_utils/shared.py b/src/humanloop/eval_utils/shared.py
index b3400839..6468d2cc 100644
--- a/src/humanloop/eval_utils/shared.py
+++ b/src/humanloop/eval_utils/shared.py
@@ -1,12 +1,11 @@
-from datetime import datetime
import logging
import typing
+from datetime import datetime
from humanloop.base_client import BaseHumanloop
from humanloop.eval_utils.domain import Evaluator
from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
-
logger = logging.getLogger("humanloop.sdk")
diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py
index 2a9c20c9..1215dceb 100644
--- a/src/humanloop/otel/constants.py
+++ b/src/humanloop/otel/constants.py
@@ -1,7 +1,6 @@
# Attribute name prefix on Humanloop spans for file-related attributes + path
-HL_FILE_OT_KEY = "humanloop.file"
+HL_FILE_KEY = "humanloop.file"
# Attribute name prefix on Humanloop spans for log-related attributes
-HL_LOG_OT_KEY = "humanloop.log"
-# OTel does not allow falsy values for top-level attributes e.g. foo
-# and None only on nested attributes e.g. foo.bar
-HL_OT_EMPTY_VALUE = "EMPTY"
+HL_LOG_KEY = "humanloop.log"
+HL_FILE_TYPE_KEY = "humanloop.file.type"
+HL_PATH_KEY = "humanloop.file.path"
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 3c907d97..8671b64f 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,7 +1,8 @@
import json
import logging
import typing
-from queue import Queue, Empty as EmptyQueue
+from queue import Empty as EmptyQueue
+from queue import Queue
from threading import Thread
from typing import Any, Optional
@@ -12,7 +13,7 @@
from humanloop.core.request_options import RequestOptions
from humanloop.eval_utils import EVALUATION_CONTEXT, EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -64,7 +65,7 @@ def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
evaluation_context = EVALUATION_CONTEXT.get()
except LookupError:
# Decorators are not used in a client.evaluations.run() context
- evaluation_context = {}
+ evaluation_context = {} # type: ignore
self._upload_queue.put((span, evaluation_context))
return SpanExportResult.SUCCESS
else:
@@ -123,13 +124,14 @@ def _do_work(self):
self._upload_queue.task_done()
def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
- hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ hl_file = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
+ file_type = span.attributes.get(HL_FILE_TYPE_KEY)
- if "prompt" in hl_file:
+ if file_type == "prompt":
export_func = self._export_prompt
- elif "tool" in hl_file:
+ elif file_type == "tool":
export_func = self._export_tool
- elif "flow" in hl_file:
+ elif file_type == "flow":
export_func = self._export_flow
else:
raise NotImplementedError(f"Unknown span type: {hl_file}")
@@ -138,11 +140,11 @@ def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: Evaluati
def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(
span,
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)
log_object: dict[str, Any] = read_from_opentelemetry_span(
span,
- key=HL_LOG_OT_KEY,
+ key=HL_LOG_KEY,
)
# NOTE: Due to OTel conventions, attributes with value of None are removed
# If not present, instantiate as empty dictionary
@@ -156,10 +158,11 @@ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationConte
log_object["messages"] = []
else:
log_object["messages"] = list(log_object["messages"].values())
- trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
- trace_parent_id = self._span_id_to_uploaded_log_id.get(
- trace_metadata.get("trace_parent_id"),
- )
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
+ if trace_metadata and "trace_parent_id" in trace_metadata:
+ trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
+ else:
+ trace_parent_id = None
prompt: PromptKernelRequestParams = file_object["prompt"]
path: str = file_object["path"]
if not isinstance(log_object["output"], str):
@@ -183,16 +186,17 @@ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationConte
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
- trace_parent_id = self._span_id_to_uploaded_log_id.get(
- trace_metadata.get("trace_parent_id"),
- )
+ if "trace_parent_id" in trace_metadata:
+ trace_parent_id = self._span_id_to_uploaded_log_id.get(
+ trace_metadata["trace_parent_id"],
+ )
tool = file_object["tool"]
- if tool.get("attributes", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
+ if not tool.get("attributes"):
tool["attributes"] = {}
- if tool.get("setup_values", HL_OT_EMPTY_VALUE) == HL_OT_EMPTY_VALUE:
+ if not tool.get("setup_values"):
tool["setup_values"] = {}
path: str = file_object["path"]
if not isinstance(log_object["output"], str):
@@ -212,18 +216,19 @@ def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
- trace_parent_id = self._span_id_to_uploaded_log_id.get(
- trace_metadata.get("trace_parent_id"),
- )
+ if "trace_parent_id" in trace_metadata:
+ trace_parent_id = self._span_id_to_uploaded_log_id.get(
+ trace_metadata["trace_parent_id"],
+ )
# Cannot write falsy values except None in OTel Span attributes
# If a None write is attempted then the attribute is removed
# making it impossible to distinguish between a Flow Span and
# Spans not created by Humanloop (see humanloop.otel.helpers.is_humanloop_span)
flow: FlowKernelRequestParams
- if file_object["flow"] == HL_OT_EMPTY_VALUE:
+ if not file_object.get("flow"):
flow = {"attributes": {}}
else:
flow = file_object["flow"]
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index eeaae9a4..c920f7a4 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -4,7 +4,7 @@
from opentelemetry.trace import SpanKind
from opentelemetry.util.types import AttributeValue
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+from humanloop.otel.constants import HL_FILE_KEY, HL_LOG_KEY
NestedDict = dict[str, Union["NestedDict", AttributeValue]]
NestedList = list[Union["NestedList", NestedDict]]
@@ -220,8 +220,8 @@ def is_humanloop_span(span: ReadableSpan) -> bool:
"""Check if the Span was created by the Humanloop SDK."""
try:
# Valid spans will have keys with the HL_FILE_OT_KEY and HL_LOG_OT_KEY prefixes present
- read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
- read_from_opentelemetry_span(span, key=HL_LOG_OT_KEY)
+ read_from_opentelemetry_span(span, key=HL_FILE_KEY)
+ read_from_opentelemetry_span(span, key=HL_LOG_KEY)
except KeyError:
return False
return True
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 578ced97..f818d578 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -9,7 +9,7 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from pydantic import ValidationError as PydanticValidationError
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY, HL_OT_EMPTY_VALUE
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY
from humanloop.otel.helpers import (
is_humanloop_span,
is_llm_provider_call,
@@ -71,17 +71,23 @@ def _is_instrumentor_span(span: ReadableSpan) -> bool:
def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]):
- hl_file = read_from_opentelemetry_span(span, key=HL_FILE_OT_KEY)
+ file_type = span.attributes[HL_FILE_TYPE_KEY]
- if "prompt" in hl_file:
+ # Processing common to all Humanloop File types
+ if span.start_time:
+ span._attributes[f"{HL_LOG_KEY}.start_time"] = span.start_time / 1e9
+ if span.end_time:
+ span._attributes[f"{HL_LOG_KEY}.end_time"] = span.end_time / 1e9
+ span._attributes[f"{HL_LOG_KEY}.created_at"] = span.end_time / 1e9
+
+ # Processing specific to each Humanloop File type
+ if file_type == "prompt":
_process_prompt(prompt_span=span, children_spans=children_spans)
return
- elif "tool" in hl_file:
- _process_tool(tool_span=span, children_spans=children_spans)
- return
- elif "flow" in hl_file:
- _process_flow(flow_span=span, children_spans=children_spans)
- return
+ elif file_type == "tool":
+ pass
+ elif file_type == "flow":
+ pass
else:
logger.error("Unknown Humanloop File Span %s", span)
@@ -99,45 +105,12 @@ def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan
break
-def _process_tool(tool_span: ReadableSpan, children_spans: list[ReadableSpan]):
- tool_log = read_from_opentelemetry_span(tool_span, key=HL_LOG_OT_KEY)
- if tool_span.start_time:
- tool_log["start_time"] = tool_span.start_time / 1e9
- if tool_span.end_time:
- tool_log["end_time"] = tool_span.end_time / 1e9
- tool_log["created_at"] = tool_span.end_time / 1e9
-
- write_to_opentelemetry_span(
- span=tool_span,
- key=HL_LOG_OT_KEY,
- value=tool_log,
- )
-
-
-def _process_flow(flow_span: ReadableSpan, children_spans: list[ReadableSpan]):
- # NOTE: Use children_spans if needed
- flow_log = read_from_opentelemetry_span(flow_span, key=HL_LOG_OT_KEY)
- if flow_span.start_time:
- flow_log["start_time"] = flow_span.start_time / 1e9
- if flow_span.end_time:
- flow_log["end_time"] = flow_span.end_time / 1e9
- flow_log["created_at"] = flow_span.end_time / 1e9
-
- write_to_opentelemetry_span(
- span=flow_span,
- key=HL_LOG_OT_KEY,
- value=flow_log,
- )
-
-
def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_KEY)
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
- prompt: dict[str, Any] = hl_file.get("prompt") # type: ignore
- if prompt == HL_OT_EMPTY_VALUE:
- prompt = {}
+ prompt: dict[str, Any] = hl_file.get("prompt", {}) # type: ignore
# Check if the Prompt Kernel keys were assigned default values
# via the @prompt arguments. Otherwise, use the information
@@ -165,15 +138,18 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
hl_file["prompt"] = prompt
write_to_opentelemetry_span(
span=prompt_span,
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
# hl_file was modified in place via prompt_kernel reference
value=hl_file,
)
def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_OT_KEY)
- hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_OT_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_KEY)
+ try:
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_KEY)
+ except KeyError:
+ hl_log = {}
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
# TODO: Seed not added by Instrumentors in provider call
@@ -185,12 +161,6 @@ def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: Readab
# Note: read_from_opentelemetry_span returns the list as a dict due to Otel conventions
hl_log["messages"] = gen_ai_object.get("prompt")
- if prompt_span.start_time:
- hl_log["start_time"] = prompt_span.start_time / 1e9
- if prompt_span.end_time:
- hl_log["end_time"] = prompt_span.end_time / 1e9
- hl_log["created_at"] = prompt_span.end_time / 1e9
-
try:
inputs = {}
system_message = gen_ai_object["prompt"]["0"]["content"]
@@ -210,7 +180,7 @@ def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: Readab
write_to_opentelemetry_span(
span=prompt_span,
- key=HL_LOG_OT_KEY,
+ key=HL_LOG_KEY,
# hl_log was modified in place
value=hl_log,
)
diff --git a/src/humanloop/requests/tool_kernel_request.py b/src/humanloop/requests/tool_kernel_request.py
index 6973c1d0..bd0cd783 100644
--- a/src/humanloop/requests/tool_kernel_request.py
+++ b/src/humanloop/requests/tool_kernel_request.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from .tool_function import ToolFunctionParams
import typing
diff --git a/tests/conftest.py b/tests/conftest.py
index ccdaa81d..c3c396b7 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -2,25 +2,23 @@
from unittest.mock import MagicMock
import pytest
-from opentelemetry.instrumentation.openai import OpenAIInstrumentor
-from opentelemetry.instrumentation.instrumentor import BaseInstrumentor # type: ignore
+from humanloop.decorators.flow import flow
+from humanloop.decorators.prompt import prompt
+from humanloop.decorators.tool import tool
+from humanloop.otel.exporter import HumanloopSpanExporter
+from humanloop.otel.processor import HumanloopSpanProcessor
+from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
-from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
from opentelemetry.instrumentation.cohere import CohereInstrumentor
from opentelemetry.instrumentation.groq import GroqInstrumentor
+from opentelemetry.instrumentation.instrumentor import BaseInstrumentor # type: ignore
+from opentelemetry.instrumentation.openai import OpenAIInstrumentor
+from opentelemetry.instrumentation.replicate import ReplicateInstrumentor
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
-from opentelemetry.trace import Tracer
-
-from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
-
-from humanloop.decorators.flow import flow
-from humanloop.decorators.prompt import prompt
-from humanloop.decorators.tool import tool
-from humanloop.otel.exporter import HumanloopSpanExporter
-from humanloop.otel.processor import HumanloopSpanProcessor
+from opentelemetry.trace import Tracer
@pytest.fixture(scope="function")
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 01d0e8e2..1375d722 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -4,11 +4,12 @@
import time
from unittest.mock import patch
+import pytest
from humanloop.decorators.flow import flow
from humanloop.decorators.prompt import prompt
from humanloop.decorators.tool import tool
from humanloop.otel import TRACE_FLOW_CONTEXT
-from humanloop.otel.constants import HL_FILE_OT_KEY
+from humanloop.otel.constants import HL_FILE_KEY
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.helpers import read_from_opentelemetry_span
from openai import OpenAI
@@ -92,11 +93,11 @@ def test_decorators_without_flow(
assert len(spans) == 3
assert read_from_opentelemetry_span(
span=spans[1],
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)["tool"]
assert read_from_opentelemetry_span(
span=spans[2],
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)["prompt"]
for span in spans:
# THEN no metadata related to trace is present on either of them
@@ -132,12 +133,12 @@ def test_decorators_with_flow_decorator(
spans = exporter.get_finished_spans()
assert len(spans) == 4
# THEN the span are returned bottom to top
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["prompt"]
- assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
- tool_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)
- prompt_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)
- flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_KEY)["prompt"]
+ # assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
+ assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id))
+ assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id))
+ assert (flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id))
# THEN Tool span is a child of Prompt span
assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
@@ -168,15 +169,16 @@ def test_flow_decorator_flow_in_flow(
# 4. Flow Span
spans = exporter.get_finished_spans()
assert len(spans) == 5
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_OT_KEY)["prompt"]
- assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
- assert read_from_opentelemetry_span(span=spans[4], key=HL_FILE_OT_KEY)["flow"]
-
- tool_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id)
- prompt_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id)
- nested_flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id)
- flow_trace_metadata = TRACE_FLOW_CONTEXT.get(spans[4].get_span_context().span_id)
+ assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_KEY)["flow"] != {}
+ with pytest.raises(KeyError):
+ read_from_opentelemetry_span(span=spans[4], key=HL_FILE_KEY)["flow"] != {}
+
+ assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id))
+ assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id))
+ assert (nested_flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[3].get_span_context().span_id))
+ assert (flow_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[4].get_span_context().span_id))
# THEN the parent of the Tool Log is the Prompt Log
assert tool_trace_metadata["trace_parent_id"] == spans[2].context.span_id
assert tool_trace_metadata["is_flow_log"] is False
@@ -223,7 +225,7 @@ def test_flow_decorator_with_hl_exporter(
# THEN the last uploaded span is the Flow
assert read_from_opentelemetry_span(
span=flow_span,
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)["flow"]["attributes"] == { # type: ignore[index,call-overload]
"foo": "bar",
"baz": 7,
@@ -231,12 +233,12 @@ def test_flow_decorator_with_hl_exporter(
# THEN the second uploaded span is the Prompt
assert "prompt" in read_from_opentelemetry_span(
span=prompt_span,
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)
# THEN the first uploaded span is the Tool
assert "tool" in read_from_opentelemetry_span(
span=tool_span,
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)
# NOTE: The type: ignore comments are caused by the MagicMock used to mock the HTTP client
@@ -289,8 +291,8 @@ def test_flow_decorator_hl_exporter_flow_inside_flow(
# THEN the second to last uploaded span is the nested Flow
flow_span = mock_export_method.call_args_list[4][0][0][0]
nested_flow_span = mock_export_method.call_args_list[3][0][0][0]
- last_span_flow_metadata = TRACE_FLOW_CONTEXT.get(flow_span.get_span_context().span_id)
- flow_span_flow_metadata = TRACE_FLOW_CONTEXT.get(nested_flow_span.get_span_context().span_id)
+ assert (last_span_flow_metadata := TRACE_FLOW_CONTEXT.get(flow_span.get_span_context().span_id))
+ assert (flow_span_flow_metadata := TRACE_FLOW_CONTEXT.get(nested_flow_span.get_span_context().span_id))
assert flow_span_flow_metadata["trace_parent_id"] == flow_span.context.span_id
assert last_span_flow_metadata["is_flow_log"]
assert flow_span_flow_metadata["is_flow_log"]
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index b8c4b80d..3e9e690c 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -12,7 +12,7 @@
from groq import Groq
from groq import NotFoundError as GroqNotFoundError
from humanloop.decorators.prompt import prompt
-from humanloop.otel.constants import HL_FILE_OT_KEY
+from humanloop.otel.constants import HL_FILE_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.types.model_providers import ModelProviders
from humanloop.types.prompt_kernel_request import PromptKernelRequest
@@ -194,7 +194,7 @@ def test_prompt_decorator_with_hl_processor(
prompt_kernel = PromptKernelRequest.model_validate(
read_from_opentelemetry_span(
span=spans[1],
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)["prompt"] # type: ignore
)
# THEN temperature is intercepted from LLM provider call
@@ -234,7 +234,7 @@ def test_prompt_decorator_with_defaults(
spans = exporter.get_finished_spans()
# THEN the Prompt span is enhanced with information and forms a correct PromptKernel
prompt = PromptKernelRequest.model_validate(
- read_from_opentelemetry_span(span=spans[1], key=HL_FILE_OT_KEY)["prompt"] # type: ignore
+ read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["prompt"] # type: ignore
)
# THEN temperature intercepted from LLM provider call is overridden by default value
assert prompt.temperature == 0.9
@@ -315,7 +315,7 @@ def test_prompt_attributes(
prompt_kernel = PromptKernelRequest.model_validate(
read_from_opentelemetry_span(
span=exporter.get_finished_spans()[1],
- key=HL_FILE_OT_KEY,
+ key=HL_FILE_KEY,
)["prompt"] # type: ignore
)
assert prompt_kernel.attributes == expected_attributes
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index f2b072e3..1a797839 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -2,7 +2,7 @@
import pytest
from humanloop.decorators.tool import tool
-from humanloop.otel.constants import HL_FILE_OT_KEY, HL_LOG_OT_KEY
+from humanloop.otel.constants import HL_FILE_KEY, HL_LOG_KEY
from humanloop.otel.helpers import read_from_opentelemetry_span
from jsonschema.protocols import Validator
from opentelemetry.sdk.trace import Tracer
@@ -34,8 +34,8 @@ def calculator(operation: str, num1: float, num2: float) -> float:
# THEN a single span is created and the log and file attributes are correctly set
spans = exporter.get_finished_spans()
assert len(spans) == 1
- hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_OT_KEY)
- hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_OT_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_KEY)
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_KEY)
assert hl_log["output"] == result == 3
assert hl_log["inputs"] == {
"operation": "add",
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
index bbca31a7..d3aa467e 100644
--- a/tests/otel/test_helpers.py
+++ b/tests/otel/test_helpers.py
@@ -51,7 +51,11 @@ def test_nested_object(test_span: Span):
def test_list(test_span: Span):
- write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}], "key")
+ write_to_opentelemetry_span(
+ test_span,
+ [{"x": 7, "y": "foo"}, {"z": "bar"}],
+ "key",
+ ) # type: ignore
# NOTE: attributes cannot be None at this point
assert dict(test_span.attributes) == { # type: ignore
"key.0.x": 7,
@@ -65,7 +69,10 @@ def test_list(test_span: Span):
def test_list_no_prefix(test_span: Span):
- write_to_opentelemetry_span(test_span, [{"x": 7, "y": "foo"}, {"z": "bar"}])
+ write_to_opentelemetry_span(
+ test_span,
+ [{"x": 7, "y": "foo"}, {"z": "bar"}], # type: ignore
+ )
# NOTE: attributes cannot be None at this point
assert dict(test_span.attributes) == { # type: ignore
"0.x": 7,
@@ -84,7 +91,7 @@ def test_multiple_nestings(test_span: Span):
[
{"x": 7, "y": "foo"},
[{"z": "bar"}, {"a": 42}],
- ],
+ ], # type: ignore
"key",
)
# NOTE: attributes cannot be None at this point
diff --git a/tests/utils/assets/models/__init__.py b/tests/utils/assets/models/__init__.py
index 3a1c852e..2cf01263 100644
--- a/tests/utils/assets/models/__init__.py
+++ b/tests/utils/assets/models/__init__.py
@@ -5,7 +5,7 @@
from .circle import CircleParams
from .object_with_defaults import ObjectWithDefaultsParams
from .object_with_optional_field import ObjectWithOptionalFieldParams
-from .shape import ShapeParams, Shape_CircleParams, Shape_SquareParams
+from .shape import Shape_CircleParams, Shape_SquareParams, ShapeParams
from .square import SquareParams
from .undiscriminated_shape import UndiscriminatedShapeParams
diff --git a/tests/utils/assets/models/circle.py b/tests/utils/assets/models/circle.py
index 3395545e..759fe3eb 100644
--- a/tests/utils/assets/models/circle.py
+++ b/tests/utils/assets/models/circle.py
@@ -2,7 +2,6 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/assets/models/object_with_defaults.py b/tests/utils/assets/models/object_with_defaults.py
index ef14f7b2..a977b1d2 100644
--- a/tests/utils/assets/models/object_with_defaults.py
+++ b/tests/utils/assets/models/object_with_defaults.py
@@ -3,7 +3,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class ObjectWithDefaultsParams(typing_extensions.TypedDict):
diff --git a/tests/utils/assets/models/object_with_optional_field.py b/tests/utils/assets/models/object_with_optional_field.py
index d6ab74e8..d667d6b8 100644
--- a/tests/utils/assets/models/object_with_optional_field.py
+++ b/tests/utils/assets/models/object_with_optional_field.py
@@ -2,12 +2,13 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import datetime as dt
import typing
+import uuid
+
import typing_extensions
from humanloop.core.serialization import FieldMetadata
-import datetime as dt
-import uuid
+
from .color import Color
from .shape import ShapeParams
from .undiscriminated_shape import UndiscriminatedShapeParams
diff --git a/tests/utils/assets/models/shape.py b/tests/utils/assets/models/shape.py
index 0160cdbd..4add344e 100644
--- a/tests/utils/assets/models/shape.py
+++ b/tests/utils/assets/models/shape.py
@@ -3,9 +3,10 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
+
import typing
+
+import typing_extensions
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/assets/models/square.py b/tests/utils/assets/models/square.py
index c7d6cfaf..da4a2111 100644
--- a/tests/utils/assets/models/square.py
+++ b/tests/utils/assets/models/square.py
@@ -2,7 +2,6 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from humanloop.core.serialization import FieldMetadata
diff --git a/tests/utils/assets/models/undiscriminated_shape.py b/tests/utils/assets/models/undiscriminated_shape.py
index 68876a23..99f12b30 100644
--- a/tests/utils/assets/models/undiscriminated_shape.py
+++ b/tests/utils/assets/models/undiscriminated_shape.py
@@ -3,6 +3,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .circle import CircleParams
from .square import SquareParams
diff --git a/tests/utils/test_serialization.py b/tests/utils/test_serialization.py
index 56591905..2ad8e1b5 100644
--- a/tests/utils/test_serialization.py
+++ b/tests/utils/test_serialization.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from typing import List, Any
+from typing import Any, List
from humanloop.core.serialization import convert_and_respect_annotation_metadata
-from .assets.models import ShapeParams, ObjectWithOptionalFieldParams
+from .assets.models import ObjectWithOptionalFieldParams, ShapeParams
UNION_TEST: ShapeParams = {"radius_measurement": 1.0, "shape_type": "circle", "id": "1"}
UNION_TEST_CONVERTED = {"shapeType": "circle", "radiusMeasurement": 1.0, "id": "1"}
From f5f90a422c9c360d52d5b408b489daaedeabaf49 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 10:27:45 +0000
Subject: [PATCH 34/70] Fixed mypy error
---
src/humanloop/decorators/flow.py | 28 ++++++++++++++--------------
src/humanloop/decorators/prompt.py | 16 ++++++++--------
src/humanloop/eval_utils/__init__.py | 9 +++++----
src/humanloop/eval_utils/domain.py | 13 +++++++++----
src/humanloop/otel/__init__.py | 7 ++++---
src/humanloop/otel/exporter.py | 26 +++++++++++++++-----------
src/humanloop/otel/helpers.py | 2 +-
src/humanloop/otel/processor.py | 8 ++++----
tests/otel/test_helpers.py | 2 +-
9 files changed, 61 insertions(+), 50 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 1644778d..32c6a46e 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -4,6 +4,7 @@
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
+from opentelemetry.util.types import AttributeValue
from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils import File
@@ -16,10 +17,11 @@
def flow(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
- attributes: Optional[dict[str, Any]] = None,
+ attributes: Optional[dict[str, AttributeValue]] = None,
):
if attributes is None:
attributes = {}
+ attributes = {k: v for k, v in attributes.items() if v is not None}
def decorator(func: Callable):
@wraps(func)
@@ -29,15 +31,14 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
- else:
- span_parent_id = None
- parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id)
- if parent_trace_metadata:
- TRACE_FLOW_CONTEXT[span_id] = FlowContext(
- trace_id=span_id,
- trace_parent_id=span_parent_id,
- is_flow_log=True,
- )
+ parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id)
+ if parent_trace_metadata:
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ trace_id=span_id,
+ trace_parent_id=span_parent_id,
+ is_flow_log=True,
+ )
+
else:
# The Flow Log is not nested under another Flow Log
# Set the trace_id to the current span_id
@@ -50,11 +51,10 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
span.set_attribute(HL_FILE_TYPE_KEY, "flow")
if attributes:
- print("HOWDIE", attributes)
write_to_opentelemetry_span(
span=span,
key=f"{HL_FILE_KEY}.flow.attributes",
- value=attributes,
+ value=attributes, # type: ignore
)
# Call the decorated function
@@ -71,7 +71,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
write_to_opentelemetry_span(
span=span,
key=HL_LOG_KEY,
- value=flow_log,
+ value=flow_log, # type: ignore
)
# Return the output of the decorated function
@@ -80,7 +80,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
func.file = File( # type: ignore
path=path if path else func.__name__,
type="flow",
- version=FlowDict(attributes=attributes),
+ version=FlowDict(attributes=attributes), # type: ignore
is_decorated=True,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 7e9c63aa..e45556ea 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -80,13 +80,13 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
- else:
- span_parent_id = None
- parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id, {})
- if parent_trace_metadata:
- TRACE_FLOW_CONTEXT[span_id] = FlowContext(
- trace_id=parent_trace_metadata["trace_id"], trace_parent_id=span_parent_id, is_flow_log=False
- )
+ parent_trace_metadata = TRACE_FLOW_CONTEXT.get(span_parent_id, {})
+ if parent_trace_metadata:
+ TRACE_FLOW_CONTEXT[span_id] = FlowContext(
+ trace_id=parent_trace_metadata["trace_id"],
+ trace_parent_id=span_parent_id,
+ is_flow_log=False,
+ )
span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
span.set_attribute(HL_FILE_TYPE_KEY, "prompt")
@@ -94,7 +94,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
write_to_opentelemetry_span(
span=span,
key=f"{HL_FILE_KEY}.prompt",
- value=prompt_kernel,
+ value=prompt_kernel, # type: ignore
)
# Call the decorated function
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index a2a7d750..1dceae85 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -198,7 +198,7 @@ def _run_eval(
attributes={"code": inspect.getsource(eval_function)},
evaluator_type="external",
)
- _ = client.evaluators.upsert(
+ client.evaluators.upsert(
id=evaluator.get("id"),
path=evaluator.get("path"),
spec=spec,
@@ -226,7 +226,8 @@ def _run_eval(
try:
evaluation = client.evaluations.create(
name=name,
- evaluators=[{"path": e["path"]} for e in evaluators],
+ dataset={"file_id": hl_dataset.id},
+ evaluators=[{"path": e["path"]} for e in evaluators], # type: ignore
file={"id": hl_file.id},
)
except ApiError as error_:
@@ -234,7 +235,7 @@ def _run_eval(
if error_.status_code == 409:
evals = client.evaluations.list(file_id=hl_file.id, size=50)
for page in evals.iter_pages():
- evaluation = next((e for e in page.items if e.name == name), None)
+ evaluation = next((e for e in page.items if e.name == name), None) # type: ignore
else:
raise error_
if not evaluation:
@@ -428,7 +429,7 @@ def get_score_from_evaluator_stat(
if stat.total_logs:
score = round(stat.num_true / stat.total_logs, 2)
elif isinstance(stat, NumericStats):
- score = round(stat.mean, 2)
+ score = round(stat.mean, 2) # type: ignore
else:
raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}")
return score # type: ignore
diff --git a/src/humanloop/eval_utils/domain.py b/src/humanloop/eval_utils/domain.py
index 8aaab9f0..102cf5fa 100644
--- a/src/humanloop/eval_utils/domain.py
+++ b/src/humanloop/eval_utils/domain.py
@@ -72,14 +72,19 @@ class Dataset(Identifiers):
class Evaluator(Identifiers):
"""The Evaluator to provide judgments for this Evaluation."""
- args_type: NotRequired[EvaluatorArgumentsType]
+ custom_logger: NotRequired[Callable]
+
"""The type of arguments the Evaluator expects - only required for local Evaluators."""
- return_type: NotRequired[EvaluatorReturnTypeEnum]
+ args_type: NotRequired[EvaluatorArgumentsType]
+
"""The type of return value the Evaluator produces - only required for local Evaluators."""
- callable: NotRequired[Callable]
+ return_type: NotRequired[EvaluatorReturnTypeEnum]
+
"""The function to run on the logs to produce the judgment - only required for local Evaluators."""
- custom_logger: NotRequired[Callable]
+ callable: NotRequired[Callable]
+
"""optional function that logs the output judgment from your Evaluator to Humanloop, if provided, it will be called as follows:
+ custom_logger: NotRequired[Callable]
```
judgment = callable(log_dict)
log = custom_logger(client, judgment)
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 4b461f10..86420f06 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -3,6 +3,7 @@
from mypy.build import TypedDict
from opentelemetry.context import Context
from opentelemetry.sdk.trace import TracerProvider
+from typing_extensions import NotRequired
from humanloop.otel.helpers import module_is_installed
@@ -57,9 +58,9 @@ def instrument_provider(provider: TracerProvider):
class FlowContext(TypedDict):
- trace_id: str
- trace_parent_id: Optional[str]
- is_flow_log: bool
+ trace_id: NotRequired[str]
+ trace_parent_id: NotRequired[Optional[int]]
+ is_flow_log: NotRequired[bool]
TRACE_FLOW_CONTEXT: dict[int, FlowContext] = {}
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 8671b64f..c5e03880 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -12,7 +12,7 @@
from humanloop.core.request_options import RequestOptions
from humanloop.eval_utils import EVALUATION_CONTEXT, EvaluationContext
-from humanloop.otel import TRACE_FLOW_CONTEXT
+from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
@@ -44,10 +44,11 @@ def __init__(
"""
super().__init__()
self._client = client
- self._span_id_to_uploaded_log_id: dict[
- str, str
- ] = {} # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace
- self._upload_queue: Queue = Queue() # Work queue for the threads uploading the spans
+ # Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace
+ self._span_id_to_uploaded_log_id: dict[int, str] = {}
+ # Work queue for the threads uploading the spans
+ self._upload_queue: Queue = Queue()
+ # Worker threads to export the spans
self._threads: list[Thread] = [
Thread(target=self._do_work, daemon=True) for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS)
]
@@ -125,7 +126,7 @@ def _do_work(self):
def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
- file_type = span.attributes.get(HL_FILE_TYPE_KEY)
+ file_type = span._attributes.get(HL_FILE_TYPE_KEY) # type: ignore
if file_type == "prompt":
export_func = self._export_prompt
@@ -159,7 +160,7 @@ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationConte
else:
log_object["messages"] = list(log_object["messages"].values())
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
- if trace_metadata and "trace_parent_id" in trace_metadata:
+ if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
else:
trace_parent_id = None
@@ -188,8 +189,8 @@ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationConte
def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
- trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
- if "trace_parent_id" in trace_metadata:
+ trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ if "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"],
)
@@ -218,10 +219,13 @@ def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext
def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
- trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
+ span.get_span_context().span_id,
+ {},
+ )
if "trace_parent_id" in trace_metadata:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
- trace_metadata["trace_parent_id"],
+ trace_metadata["trace_parent_id"], # type: ignore
)
# Cannot write falsy values except None in OTel Span attributes
# If a None write is attempted then the attribute is removed
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index c920f7a4..9c7b8577 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -64,7 +64,7 @@ def write_to_opentelemetry_span(
if isinstance(value, list):
to_write_copy = _list_to_ott(value)
else:
- to_write_copy = dict(value)
+ to_write_copy = dict(value) # type: ignore
linearised_attributes: dict[str, AttributeValue] = {}
work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
"""
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index f818d578..5ff112e5 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -71,14 +71,14 @@ def _is_instrumentor_span(span: ReadableSpan) -> bool:
def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]):
- file_type = span.attributes[HL_FILE_TYPE_KEY]
+ file_type = span.attributes[HL_FILE_TYPE_KEY] # type: ignore
# Processing common to all Humanloop File types
if span.start_time:
- span._attributes[f"{HL_LOG_KEY}.start_time"] = span.start_time / 1e9
+ span._attributes[f"{HL_LOG_KEY}.start_time"] = int(span.start_time / 1e9) # type: ignore
if span.end_time:
- span._attributes[f"{HL_LOG_KEY}.end_time"] = span.end_time / 1e9
- span._attributes[f"{HL_LOG_KEY}.created_at"] = span.end_time / 1e9
+ span._attributes[f"{HL_LOG_KEY}.end_time"] = int(span.end_time / 1e9) # type: ignore
+ span._attributes[f"{HL_LOG_KEY}.created_at"] = int(span.end_time / 1e9) # type: ignore
# Processing specific to each Humanloop File type
if file_type == "prompt":
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
index d3aa467e..635030de 100644
--- a/tests/otel/test_helpers.py
+++ b/tests/otel/test_helpers.py
@@ -53,7 +53,7 @@ def test_nested_object(test_span: Span):
def test_list(test_span: Span):
write_to_opentelemetry_span(
test_span,
- [{"x": 7, "y": "foo"}, {"z": "bar"}],
+ [{"x": 7, "y": "foo"}, {"z": "bar"}], # type: ignore
"key",
) # type: ignore
# NOTE: attributes cannot be None at this point
From 02f45a67aefd6f2e334bf340921ca638a378ca56 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 14:15:09 +0000
Subject: [PATCH 35/70] Fixed warning from OTel
---
src/humanloop/client.py | 16 ++-
src/humanloop/eval_utils/__init__.py | 113 ++++++++++++++++--
src/humanloop/eval_utils/context.py | 17 +++
src/humanloop/eval_utils/shared.py | 50 --------
.../eval_utils/{domain.py => types.py} | 0
src/humanloop/otel/__init__.py | 20 +---
src/humanloop/otel/exporter.py | 33 +++--
src/humanloop/otel/helpers.py | 3 +-
8 files changed, 159 insertions(+), 93 deletions(-)
delete mode 100644 src/humanloop/eval_utils/shared.py
rename src/humanloop/eval_utils/{domain.py => types.py} (100%)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index bded949e..773e6570 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -18,8 +18,8 @@
from .decorators.prompt import prompt as prompt_decorator_factory
from .decorators.tool import tool as tool_decorator_factory
from .environment import HumanloopEnvironment
-from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
-from humanloop.eval_utils import _run_eval
+from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.eval_utils import run_eval
from .evaluations.client import EvaluationsClient
from .otel import instrument_provider
from .otel.exporter import HumanloopSpanExporter
@@ -52,7 +52,7 @@ def run(
if self.client is None:
raise ValueError("Need Humanloop client defined to run evals")
- return _run_eval(
+ return run_eval(
client=self.client,
file=file,
name=name,
@@ -217,7 +217,7 @@ def call_llm(messages):
:param path: The path where the Prompt is created. If not
provided, the function name is used as the path and the File
- is created in the root of your Humanloop's organization workspace.
+ is created in the root of your Humanloop organization workspace.
:param model: Name of the model used by the Prompt.
@@ -258,6 +258,10 @@ def call_llm(messages):
:param response_format: The format of the response.
Only `{"type": "json_object"}` is currently supported
for chat.
+
+ :param attributes: Additional fields to describe the Prompt. Helpful to
+ separate Prompt versions from each other with details on how they
+ were created or used.
"""
return prompt_decorator_factory(
opentelemetry_tracer=self._opentelemetry_tracer,
@@ -342,7 +346,7 @@ def calculator(a: int, b: Optional[int]) -> int:
:param path: The path to the Tool. If not provided, the function name
will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
+ of your organization's workspace.
:param setup_values: Values needed to setup the Tool, defined in
JSON Schema format: https://json-schema.org/
@@ -404,7 +408,7 @@ def entrypoint():
:param path: The path to the Flow. If not provided, the function name
will be used as the path and the File will be created in the root
- of your Humanloop's organization workspace.
+ of your organization workspace.
:param attributes: A key-value object identifying the Flow Version.
"""
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index 1dceae85..6a61d2ea 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -30,10 +30,9 @@
from humanloop.client import BaseHumanloop
from humanloop.core.api_error import ApiError
from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
-from humanloop.eval_utils.domain import Dataset, Evaluator, EvaluatorCheck, File
+from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
# We use TypedDicts for requests, which is consistent with the rest of the SDK
-from humanloop.eval_utils.shared import add_log_to_evaluation
from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
from humanloop.requests import FlowKernelRequestParams as FlowDict
@@ -44,6 +43,7 @@
from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
from humanloop.types import DatapointResponse as Datapoint
from humanloop.types import EvaluationResponse, EvaluationStats, VersionStatsResponse
+from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
# Responses are Pydantic models and we leverage them for improved request validation
from humanloop.types import FlowKernelRequest as Flow
@@ -80,7 +80,53 @@
RESET = "\033[0m"
-def _run_eval(
+class _SimpleProgressBar:
+ """Thread-safe progress bar for the console."""
+
+ def __init__(self, total: int):
+ if total <= 0:
+ self._total = 1
+ else:
+ self._total = total
+ self._progress = 0
+ self._lock = threading.Lock()
+ self._start_time = None
+
+ def increment(self):
+ """Increment the progress bar by one finished task."""
+ with self._lock:
+ self._progress += 1
+ if self._start_time is None:
+ self._start_time = time.time()
+
+ bar_length = 40
+ block = int(round(bar_length * self._progress / self._total))
+ bar = "#" * block + "-" * (bar_length - block)
+
+ percentage = (self._progress / self._total) * 100
+ elapsed_time = time.time() - self._start_time
+ time_per_item = elapsed_time / self._progress if self._progress > 0 else 0
+ eta = (self._total - self._progress) * time_per_item
+
+ progress_display = f"\r[{bar}] {self._progress}/{self._total}"
+ progress_display += f" ({percentage:.2f}%)"
+
+ if self._progress < self._total:
+ progress_display += f" | ETA: {int(eta)}s"
+ else:
+ progress_display += " | DONE"
+
+ sys.stderr.write(progress_display)
+
+ if self._progress >= self._total:
+ sys.stderr.write("\n")
+
+
+# Module-level so it can be shared by threads.
+_PROGRESS_BAR: Optional[_SimpleProgressBar] = None
+
+
+def run_eval(
client: BaseHumanloop,
file: Union[File, Callable],
name: Optional[str],
@@ -205,6 +251,16 @@ def _run_eval(
)
function_ = typing.cast(Callable, function_)
+ # Validate signature of the called function
+ function_signature = inspect.signature(function_)
+ parameter_names = list(function_signature.parameters.keys())
+ if parameter_names != ["inputs", "messages"] and parameter_names != ["inputs"]:
+ raise ValueError(
+ f"Your {type_}'s `callable` must have the signature `def "
+ "function(inputs: dict, messages: Optional[dict] = None):` "
+ "or `def function(inputs: dict):`"
+ )
+
# Validate upfront that the local Evaluators and Dataset fit
requires_target = False
for local_evaluator in local_evaluators:
@@ -280,11 +336,9 @@ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
messages=datapoint_dict["messages"],
)
else:
- # function_ is not None at this point
output = function_(**datapoint_dict["inputs"]) # type: ignore
if custom_logger:
- # function_ is not None at this point
- log = function_(client=client, output=output) # type: ignore
+ log = custom_logger(client=client, output=output) # type: ignore
else:
if not isinstance(output, str):
raise ValueError(
@@ -307,7 +361,7 @@ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
)
logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
- add_log_to_evaluation(
+ _add_log_to_evaluation(
client=client,
log=log,
datapoint_target=dp.target,
@@ -491,7 +545,7 @@ def get_evaluator_stats_by_path(
return evaluator_stats_by_path # type: ignore
-def check_evaluation_threshold(
+def _check_evaluation_threshold(
evaluation: EvaluationResponse,
stats: EvaluationStats,
evaluator_path: str,
@@ -521,7 +575,7 @@ def check_evaluation_threshold(
raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
-def check_evaluation_improvement(
+def _check_evaluation_improvement(
evaluation: EvaluationResponse,
evaluator_path: str,
stats: EvaluationStats,
@@ -562,3 +616,44 @@ def check_evaluation_improvement(
return False, latest_score, diff
else:
raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
+
+
+def _add_log_to_evaluation(
+ client: BaseHumanloop,
+ log: dict,
+ datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
+ local_evaluators: list[Evaluator],
+):
+ for local_evaluator in local_evaluators:
+ start_time = datetime.now()
+ try:
+ eval_function = local_evaluator["callable"]
+ if local_evaluator["args_type"] == "target_required":
+ judgement = eval_function(
+ log,
+ datapoint_target,
+ )
+ else:
+ judgement = eval_function(log)
+
+ if local_evaluator.get("custom_logger", None):
+ local_evaluator["custom_logger"](judgement, start_time, datetime.now())
+ else:
+ _ = client.evaluators.log(
+ parent_id=log["id"],
+ judgment=judgement,
+ id=local_evaluator.get("id"),
+ path=local_evaluator.get("path"),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ except Exception as e:
+ _ = client.evaluators.log(
+ parent_id=log["id"],
+ path=local_evaluator.get("path"),
+ id=local_evaluator.get("id"),
+ error=str(e),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
index 4f949fe5..89082742 100644
--- a/src/humanloop/eval_utils/context.py
+++ b/src/humanloop/eval_utils/context.py
@@ -4,9 +4,26 @@
class EvaluationContext(TypedDict):
+ """Context required by the Exporter when uploading a Log to Humanloop.
+
+ When using the evaluation run utility on decorated functions, the utility
+ has does not control the Log upload - the Exporter does. This context class
+ propagates the required information to the exporter and allows it to notify
+ the utility via a callback.
+ """
+
+ """Required for uploading the Log in the Exporter."""
source_datapoint_id: str
+
+ """Exporter calls this so the eval_utils are notified to evaluate an uploaded Log."""
upload_callback: Callable[[dict], None]
+
+ """Logs of multiple Files can be uploaded by the Exporter while
+ evaluating a single one of them. This identifies the File that
+ owns Logs that are part of the Evaluation."""
evaluated_file_id: str
+
+ """Required for uploading the Log in the Exporter."""
run_id: str
diff --git a/src/humanloop/eval_utils/shared.py b/src/humanloop/eval_utils/shared.py
deleted file mode 100644
index 6468d2cc..00000000
--- a/src/humanloop/eval_utils/shared.py
+++ /dev/null
@@ -1,50 +0,0 @@
-import logging
-import typing
-from datetime import datetime
-
-from humanloop.base_client import BaseHumanloop
-from humanloop.eval_utils.domain import Evaluator
-from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
-
-logger = logging.getLogger("humanloop.sdk")
-
-
-def add_log_to_evaluation(
- client: BaseHumanloop,
- log: dict,
- datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
- local_evaluators: list[Evaluator],
-):
- for local_evaluator in local_evaluators:
- start_time = datetime.now()
- try:
- eval_function = local_evaluator["callable"]
- if local_evaluator["args_type"] == "target_required":
- judgement = eval_function(
- log,
- datapoint_target,
- )
- else:
- judgement = eval_function(log)
-
- if local_evaluator.get("custom_logger", None):
- local_evaluator["custom_logger"](judgement, start_time, datetime.now())
- else:
- _ = client.evaluators.log(
- parent_id=log['id'],
- judgment=judgement,
- id=local_evaluator.get("id"),
- path=local_evaluator.get("path"),
- start_time=start_time,
- end_time=datetime.now(),
- )
- except Exception as e:
- _ = client.evaluators.log(
- parent_id=log['id'],
- path=local_evaluator.get("path"),
- id=local_evaluator.get("id"),
- error=str(e),
- start_time=start_time,
- end_time=datetime.now(),
- )
- logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
diff --git a/src/humanloop/eval_utils/domain.py b/src/humanloop/eval_utils/types.py
similarity index 100%
rename from src/humanloop/eval_utils/domain.py
rename to src/humanloop/eval_utils/types.py
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 86420f06..99e5dde3 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -1,29 +1,11 @@
from typing import Optional
-from mypy.build import TypedDict
-from opentelemetry.context import Context
+from typing import TypedDict
from opentelemetry.sdk.trace import TracerProvider
from typing_extensions import NotRequired
from humanloop.otel.helpers import module_is_installed
-"""
-Humanloop SDK uses the Baggage concept from OTel
-to store the Trace metadata. Read more here:
-https://opentelemetry.io/docs/concepts/signals/baggage/
-
-The top of the stack contains the Trace information of
-the parent Span.
-
-When a Span is created by a decorator, the metadata of
-that Span is pushed to the stack so the children can
-peek at it and determine its parent in a Flow Trace.
-
-When the parent Span is completed, the context is popped
-off the stack.
-"""
-_BAGGAGE_CONTEXT_STACK: list[Context] = [Context()]
-
def instrument_provider(provider: TracerProvider):
"""Add Instrumentors to the TracerProvider.
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index c5e03880..f6892e9d 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -124,7 +124,11 @@ def _do_work(self):
self._upload_queue.put((span_to_export, evaluation_context))
self._upload_queue.task_done()
- def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
+ def _export_span_dispatch(
+ self,
+ span: ReadableSpan,
+ evaluation_context: EvaluationContext,
+ ) -> None:
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
file_type = span._attributes.get(HL_FILE_TYPE_KEY) # type: ignore
@@ -138,7 +142,11 @@ def _export_span_dispatch(self, span: ReadableSpan, evaluation_context: Evaluati
raise NotImplementedError(f"Unknown span type: {hl_file}")
export_func(span=span, evaluation_context=evaluation_context)
- def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
+ def _export_prompt(
+ self,
+ span: ReadableSpan,
+ evaluation_context: EvaluationContext,
+ ) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(
span,
key=HL_FILE_KEY,
@@ -182,11 +190,16 @@ def _export_prompt(self, span: ReadableSpan, evaluation_context: EvaluationConte
request_options=RequestOptions(max_retries=3),
)
if evaluation_context and log_response.prompt_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
log_object["id"] = log_response.id
evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
+ def _export_tool(
+ self,
+ span: ReadableSpan,
+ evaluation_context: EvaluationContext,
+ ) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
@@ -194,6 +207,8 @@ def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"],
)
+ else:
+ trace_parent_id = None
tool = file_object["tool"]
if not tool.get("attributes"):
tool["attributes"] = {}
@@ -212,11 +227,16 @@ def _export_tool(self, span: ReadableSpan, evaluation_context: EvaluationContext
request_options=RequestOptions(max_retries=3),
)
if evaluation_context and log_response.tool_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
log_object["id"] = log_response.id
evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext) -> None:
+ def _export_flow(
+ self,
+ span: ReadableSpan,
+ evaluation_context: EvaluationContext,
+ ) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
@@ -227,10 +247,6 @@ def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"], # type: ignore
)
- # Cannot write falsy values except None in OTel Span attributes
- # If a None write is attempted then the attribute is removed
- # making it impossible to distinguish between a Flow Span and
- # Spans not created by Humanloop (see humanloop.otel.helpers.is_humanloop_span)
flow: FlowKernelRequestParams
if not file_object.get("flow"):
flow = {"attributes": {}}
@@ -251,5 +267,6 @@ def _export_flow(self, span: ReadableSpan, evaluation_context: EvaluationContext
request_options=RequestOptions(max_retries=3),
)
if evaluation_context and log_response.flow_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 9c7b8577..89fc85ed 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -105,7 +105,8 @@ def write_to_opentelemetry_span(
else:
linearised_attributes[key] = value # type: ignore
for final_key, final_value in linearised_attributes.items():
- span._attributes[final_key] = final_value # type: ignore
+ if final_value is not None:
+ span._attributes[final_key] = final_value # type: ignore
def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDict:
From 0a7507abb2241e3b41000bbd795a89ab3d6c229b Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 15:08:04 +0000
Subject: [PATCH 36/70] Added error handling at function and exporter levels
---
src/humanloop/client.py | 8 +++
src/humanloop/decorators/flow.py | 21 +++++-
src/humanloop/decorators/prompt.py | 41 +++++++----
src/humanloop/decorators/tool.py | 15 +++-
src/humanloop/otel/__init__.py | 3 +-
src/humanloop/otel/exporter.py | 109 ++++++++++++++++++-----------
6 files changed, 136 insertions(+), 61 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 773e6570..a73e9b12 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -13,6 +13,9 @@
from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
from humanloop.types.response_format import ResponseFormat
+if typing.TYPE_CHECKING:
+ from . import ToolFunctionParams
+
from .base_client import AsyncBaseHumanloop, BaseHumanloop
from .decorators.flow import flow as flow_decorator_factory
from .decorators.prompt import prompt as prompt_decorator_factory
@@ -153,6 +156,7 @@ def prompt(
other: Optional[dict[str, Optional[Any]]] = None,
seed: Optional[int] = None,
response_format: Optional[ResponseFormat] = None,
+ tools: Optional[Sequence["ToolFunctionParams"]] = None,
):
"""Decorator for declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code.
@@ -262,6 +266,9 @@ def call_llm(messages):
:param attributes: Additional fields to describe the Prompt. Helpful to
separate Prompt versions from each other with details on how they
were created or used.
+
+ :param tools: The tool specification that the model can choose to call if Tool
+ calling is supported.
"""
return prompt_decorator_factory(
opentelemetry_tracer=self._opentelemetry_tracer,
@@ -280,6 +287,7 @@ def call_llm(messages):
other=other,
seed=seed,
response_format=response_format,
+ tools=tools,
)
def tool(
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 32c6a46e..c4507da5 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,3 +1,4 @@
+import logging
import uuid
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
@@ -13,6 +14,8 @@
from humanloop.otel.helpers import write_to_opentelemetry_span
from humanloop.requests import FlowKernelRequestParams as FlowDict
+logger = logging.getLogger("humanloop.sdk")
+
def flow(
opentelemetry_tracer: Tracer,
@@ -57,10 +60,22 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
value=attributes, # type: ignore
)
- # Call the decorated function
- output = func(*args, **kwargs)
inputs = args_to_inputs(func, args, kwargs)
- flow_log = {}
+
+ # Call the decorated function
+ try:
+ output = func(*args, **kwargs)
+ error = None
+ except Exception as e:
+ logger.error(str(e))
+ output = None
+ error = str(e)
+
+ flow_log = {
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ }
if inputs:
flow_log["inputs"] = inputs
if output:
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index e45556ea..2611c22b 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,3 +1,5 @@
+import logging
+import typing
import uuid
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
@@ -5,6 +7,8 @@
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
+if typing.TYPE_CHECKING:
+ from humanloop import ToolFunctionParams
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
@@ -15,6 +19,8 @@
from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
from humanloop.types.response_format import ResponseFormat
+logger = logging.getLogger("humanloop.sdk")
+
def prompt(
opentelemetry_tracer: Tracer,
@@ -34,6 +40,7 @@ def prompt(
other: Optional[dict[str, Optional[Any]]] = None,
seed: Optional[int] = None,
response_format: Optional[ResponseFormat] = None,
+ tools: Optional[Sequence["ToolFunctionParams"]] = None,
):
def decorator(func: Callable):
prompt_kernel = {}
@@ -68,10 +75,10 @@ def decorator(func: Callable):
"other": other,
"seed": seed,
"response_format": response_format,
- "attributes": attributes if attributes != {} else None,
+ "attributes": attributes or None,
+ "tools": tools or None,
}.items():
- if attr_value is not None:
- prompt_kernel[attr_name] = attr_value # type: ignore
+ prompt_kernel[attr_name] = attr_value # type: ignore
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
@@ -90,6 +97,8 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
span.set_attribute(HL_FILE_TYPE_KEY, "prompt")
+
+ # Avoid writing falsy values to OTel, otherwise a
if prompt_kernel:
write_to_opentelemetry_span(
span=span,
@@ -98,15 +107,23 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
)
# Call the decorated function
- output = func(*args, **kwargs)
-
- if output:
- prompt_log = {"output": output}
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_KEY,
- value=prompt_log,
- )
+ try:
+ output = func(*args, **kwargs)
+ error = None
+ except Exception as e:
+ logger.error(str(e))
+ output = None
+ error = str(e)
+
+ prompt_log = {
+ "output": output,
+ "error": error,
+ }
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_KEY,
+ value=prompt_log,
+ )
# Return the output of the decorated function
return output
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index fa28d55c..f6a6cd85 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -1,5 +1,6 @@
import builtins
import inspect
+import logging
import textwrap
import typing
import uuid
@@ -18,6 +19,8 @@
from .helpers import args_to_inputs
+logger = logging.getLogger("humanloop.sdk")
+
def tool(
opentelemetry_tracer: Tracer,
@@ -64,14 +67,20 @@ def wrapper(*args, **kwargs):
)
# Call the decorated function
- output = func(*args, **kwargs)
+ try:
+ output = func(*args, **kwargs)
+ error = None
+ except Exception as e:
+ logger.error(str(e))
+ output = None
+ error = str(e)
# Populate known Tool Log attributes
tool_log = {
"inputs": args_to_inputs(func, args, kwargs),
+ "output": output,
+ "error": error,
}
- if output:
- tool_log["output"] = output
# Write the Tool Log to the Span on HL_LOG_OT_KEY
if tool_log:
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index 99e5dde3..f0c4cb1d 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -1,6 +1,5 @@
-from typing import Optional
+from typing import Optional, TypedDict
-from typing import TypedDict
from opentelemetry.sdk.trace import TracerProvider
from typing_extensions import NotRequired
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index f6892e9d..68be5c31 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -10,10 +10,11 @@
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
+from humanloop.core import ApiError as HumanloopApiError
from humanloop.core.request_options import RequestOptions
from humanloop.eval_utils import EVALUATION_CONTEXT, EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY
+from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -45,7 +46,7 @@ def __init__(
super().__init__()
self._client = client
# Uploaded spans translate to a Log on Humanloop. The IDs are required to link Logs in a Flow Trace
- self._span_id_to_uploaded_log_id: dict[int, str] = {}
+ self._span_id_to_uploaded_log_id: dict[int, Optional[str]] = {}
# Work queue for the threads uploading the spans
self._upload_queue: Queue = Queue()
# Worker threads to export the spans
@@ -170,6 +171,10 @@ def _export_prompt(
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
+ if trace_parent_id is None:
+ # Parent Log in Trace upload failed
+ file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ logger.error(f"Skipping log for {file_path}: parent Log upload failed")
else:
trace_parent_id = None
prompt: PromptKernelRequestParams = file_object["prompt"]
@@ -180,20 +185,24 @@ def _export_prompt(
log_object["output"] = json.dumps(log_object["output"])
if "attributes" not in prompt or not prompt["attributes"]:
prompt["attributes"] = {}
- log_response = self._client.prompts.log(
- path=path,
- prompt=prompt,
- **log_object,
- trace_parent_id=trace_parent_id,
- source_datapoint_id=evaluation_context.get("source_datapoint_id"),
- run_id=evaluation_context.get("run_id"),
- request_options=RequestOptions(max_retries=3),
- )
- if evaluation_context and log_response.prompt_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- log_object["id"] = log_response.id
- evaluation_context["upload_callback"](log_object)
- self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
+ try:
+ log_response = self._client.prompts.log(
+ path=path,
+ prompt=prompt,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ source_datapoint_id=evaluation_context.get("source_datapoint_id"),
+ run_id=evaluation_context.get("run_id"),
+ request_options=RequestOptions(max_retries=3),
+ )
+ if evaluation_context and log_response.prompt_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
+ log_object["id"] = log_response.id
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
+ except HumanloopApiError as e:
+ logger.error(str(e))
+ self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_tool(
self,
@@ -207,6 +216,10 @@ def _export_tool(
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"],
)
+ if trace_parent_id is None:
+ # Parent Log in Trace upload failed
+ file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ logger.error(f"Skipping log for {file_path}: parent Log upload failed")
else:
trace_parent_id = None
tool = file_object["tool"]
@@ -219,18 +232,22 @@ def _export_tool(
# Output expected to be a string, if decorated function
# does not return one, jsonify it
log_object["output"] = json.dumps(log_object["output"])
- log_response = self._client.tools.log(
- path=path,
- tool=tool,
- **log_object,
- trace_parent_id=trace_parent_id,
- request_options=RequestOptions(max_retries=3),
- )
- if evaluation_context and log_response.tool_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- log_object["id"] = log_response.id
- evaluation_context["upload_callback"](log_object)
- self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
+ try:
+ log_response = self._client.tools.log(
+ path=path,
+ tool=tool,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ request_options=RequestOptions(max_retries=3),
+ )
+ if evaluation_context and log_response.tool_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
+ log_object["id"] = log_response.id
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
+ except HumanloopApiError as e:
+ logger.error(str(e))
+ self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_flow(
self,
@@ -247,6 +264,12 @@ def _export_flow(
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"], # type: ignore
)
+ if trace_parent_id is None:
+ # Parent Log in Trace upload failed
+ file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ logger.error(f"Skipping log for {file_path}: parent Log upload failed")
+ else:
+ trace_parent_id = None
flow: FlowKernelRequestParams
if not file_object.get("flow"):
flow = {"attributes": {}}
@@ -257,16 +280,20 @@ def _export_flow(
# Output expected to be a string, if decorated function
# does not return one, jsonify it
log_object["output"] = json.dumps(log_object["output"])
- log_response = self._client.flows.log(
- path=path,
- flow=flow,
- **log_object,
- trace_parent_id=trace_parent_id,
- source_datapoint_id=evaluation_context.get("source_datapoint_id"),
- run_id=evaluation_context.get("run_id"),
- request_options=RequestOptions(max_retries=3),
- )
- if evaluation_context and log_response.flow_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- evaluation_context["upload_callback"](log_object)
- self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
+ try:
+ log_response = self._client.flows.log(
+ path=path,
+ flow=flow,
+ **log_object,
+ trace_parent_id=trace_parent_id,
+ source_datapoint_id=evaluation_context.get("source_datapoint_id"),
+ run_id=evaluation_context.get("run_id"),
+ request_options=RequestOptions(max_retries=3),
+ )
+ if evaluation_context and log_response.flow_id == evaluation_context["evaluated_file_id"]:
+ # Multiple Logs could be triggered by the Evaluation of a single File
+ evaluation_context["upload_callback"](log_object)
+ self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
+ except HumanloopApiError as e:
+ logger.error(str(e))
+ self._span_id_to_uploaded_log_id[span.context.span_id] = None
From 888688000396f9940e752f9930156f2cf38cc2e0 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 15:16:15 +0000
Subject: [PATCH 37/70] mypy nit
---
src/humanloop/eval_utils/__init__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index 6a61d2ea..5cfbf778 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -367,7 +367,7 @@ def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
datapoint_target=dp.target,
local_evaluators=local_evaluators,
)
- _PROGRESS_BAR.increment()
+ _PROGRESS_BAR.increment() # type: ignore
# Execute the function and send the logs to Humanloop in parallel
logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n")
From 51b518f12990597379000ff2a7dd852efe8cbb35 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 15:26:02 +0000
Subject: [PATCH 38/70] Early exit for logs whose parent in trace failed
---
src/humanloop/otel/exporter.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 68be5c31..739f6489 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -175,6 +175,7 @@ def _export_prompt(
# Parent Log in Trace upload failed
file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
+ return
else:
trace_parent_id = None
prompt: PromptKernelRequestParams = file_object["prompt"]
@@ -220,6 +221,7 @@ def _export_tool(
# Parent Log in Trace upload failed
file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
+ return
else:
trace_parent_id = None
tool = file_object["tool"]
@@ -268,6 +270,7 @@ def _export_flow(
# Parent Log in Trace upload failed
file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
+ return
else:
trace_parent_id = None
flow: FlowKernelRequestParams
From 3b314113ecfc2f6b73d2e9654245a415902dc97a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 5 Nov 2024 15:44:18 +0000
Subject: [PATCH 39/70] Added more docstrings
---
src/humanloop/eval_utils/__init__.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index 5cfbf778..58dbcde6 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -314,6 +314,9 @@ def process_datapoint(datapoint: Datapoint):
if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
output = function_(**datapoint_dict["inputs"], messages=datapoint_dict["messages"])
else:
+ # function_ is decorated by Humanloop, the OTel Exporter will
+ # handle the logging, which will call the upload_callback
+ # function above when it's done
function_(datapoint_dict["inputs"]) # type: ignore
else:
From 86ef9a440eddd984f6a77212ae2ec6b8cb5e07bb Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 11:28:32 +0000
Subject: [PATCH 40/70] Refactored evaluation context
---
src/humanloop/__init__.py | 5 -
src/humanloop/client.py | 24 +-
src/humanloop/decorators/flow.py | 4 +-
src/humanloop/decorators/prompt.py | 12 +-
src/humanloop/decorators/tool.py | 2 +-
src/humanloop/eval_utils/__init__.py | 664 +-----------------------
src/humanloop/eval_utils/context.py | 24 +-
src/humanloop/eval_utils/run.py | 737 +++++++++++++++++++++++++++
src/humanloop/otel/exporter.py | 102 ++--
src/humanloop/otel/helpers.py | 77 ++-
src/humanloop/otel/processor.py | 6 +-
tests/conftest.py | 5 +-
tests/otel/test_helpers.py | 31 +-
13 files changed, 898 insertions(+), 795 deletions(-)
create mode 100644 src/humanloop/eval_utils/run.py
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 6134f370..cba542c9 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -27,11 +27,9 @@
DirectoryWithParentsAndChildrenResponseFilesItem,
EnvironmentResponse,
EnvironmentTag,
- EvaluatedVersionResponse,
EvaluateeRequest,
EvaluateeResponse,
EvaluationEvaluatorResponse,
- EvaluationReportLogResponse,
EvaluationResponse,
EvaluationStats,
EvaluationStatus,
@@ -86,7 +84,6 @@
NumericEvaluatorStatsResponse,
ObservabilityStatus,
OverallStats,
- PaginatedDataEvaluationReportLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
@@ -197,8 +194,6 @@
EvaluationLogResponseParams,
EvaluationResponseParams,
EvaluationStatsParams,
- EvaluationsDatasetRequestParams,
- EvaluationsRequestParams,
EvaluatorActivationDeactivationRequestActivateItemParams,
EvaluatorActivationDeactivationRequestDeactivateItemParams,
EvaluatorActivationDeactivationRequestParams,
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index a73e9b12..eb2f3204 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,3 +1,4 @@
+import logging
import os
import typing
from typing import Any, Callable, List, Optional, Sequence, Union
@@ -16,13 +17,14 @@
if typing.TYPE_CHECKING:
from . import ToolFunctionParams
+from humanloop.eval_utils import log_with_evaluation_context, run_eval
+from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
+
from .base_client import AsyncBaseHumanloop, BaseHumanloop
from .decorators.flow import flow as flow_decorator_factory
from .decorators.prompt import prompt as prompt_decorator_factory
from .decorators.tool import tool as tool_decorator_factory
from .environment import HumanloopEnvironment
-from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
-from humanloop.eval_utils import run_eval
from .evaluations.client import EvaluationsClient
from .otel import instrument_provider
from .otel.exporter import HumanloopSpanExporter
@@ -109,6 +111,19 @@ def __init__(
httpx_client=httpx_client,
)
+ eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
+ eval_client.client = self
+ self.evaluations = eval_client
+ self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
+
+ # Overload the .log method of the clients to be aware of Evaluation Context
+ # TODO: Overload the log for Evaluators and Tools once run_id is added
+ # to them.
+ self.prompts = log_with_evaluation_context(client=self.prompts)
+ # self.evaluators = log_with_evaluation_context(client=self.evaluators)
+ # self.tools = log_with_evaluation_context(client=self.tools)
+ self.flows = log_with_evaluation_context(client=self.flows)
+
if opentelemetry_tracer_provider is not None:
self._tracer_provider = opentelemetry_tracer_provider
else:
@@ -133,11 +148,6 @@ def __init__(
else:
self._opentelemetry_tracer = opentelemetry_tracer
- eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
- eval_client.client = self
- self.evaluations = eval_client
- self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
-
def prompt(
self,
*,
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index c4507da5..d019efe8 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -8,7 +8,7 @@
from opentelemetry.util.types import AttributeValue
from humanloop.decorators.helpers import args_to_inputs
-from humanloop.eval_utils import File
+from humanloop.eval_utils.types import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import write_to_opentelemetry_span
@@ -67,7 +67,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(str(e))
+ logger.error(f"{func.__name__}: {str(e)}")
output = None
error = str(e)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 2611c22b..b066f8d3 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -76,7 +76,6 @@ def decorator(func: Callable):
"seed": seed,
"response_format": response_format,
"attributes": attributes or None,
- "tools": tools or None,
}.items():
prompt_kernel[attr_name] = attr_value # type: ignore
@@ -98,7 +97,6 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
span.set_attribute(HL_FILE_TYPE_KEY, "prompt")
- # Avoid writing falsy values to OTel, otherwise a
if prompt_kernel:
write_to_opentelemetry_span(
span=span,
@@ -111,7 +109,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(str(e))
+ logger.error(f"{func.__name__}: {e}")
output = None
error = str(e)
@@ -128,10 +126,16 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Return the output of the decorated function
return output
+ prompt_kernel_file = {**prompt_kernel}
+ if prompt_kernel_file.get("provider") is None:
+ prompt_kernel_file["provider"] = "openai" # type: ignore
+ if prompt_kernel_file.get("endpoint") is None:
+ prompt_kernel_file["endpoint"] = "chat" # type: ignore
+
wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="prompt",
- version=prompt_kernel, # type: ignore
+ version={**prompt_kernel_file}, # type: ignore
is_decorated=True,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index f6a6cd85..6285af80 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -71,7 +71,7 @@ def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(str(e))
+ logger.error(f"{func.__name__}: {e}")
output = None
error = str(e)
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index 58dbcde6..abd63528 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -1,662 +1,4 @@
-"""
-Evaluation utils for the Humanloop SDK.
+from .run import run_eval, log_with_evaluation_context
+from .types import File
-This module provides a set of utilities to aid running Eval workflows on Humanloop
-where you are managing the runtime of your application in your code.
-
-Functions in this module should be accessed via the Humanloop client. They should
-not be called directly.
-"""
-
-import inspect
-import logging
-import sys
-import threading
-import time
-import typing
-from concurrent.futures import ThreadPoolExecutor
-from datetime import datetime
-from functools import partial
-from logging import INFO
-from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, Union
-
-from pydantic import ValidationError
-from typing import Callable, Sequence, Literal, Union, Optional, List, Dict, Tuple
-import time
-import sys
-
-
-from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
-from humanloop.client import BaseHumanloop
-from humanloop.core.api_error import ApiError
-from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
-from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
-
-# We use TypedDicts for requests, which is consistent with the rest of the SDK
-from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
-from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
-from humanloop.requests import FlowKernelRequestParams as FlowDict
-from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
-from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
-from humanloop.requests import PromptKernelRequestParams as PromptDict
-from humanloop.requests import ToolKernelRequestParams as ToolDict
-from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
-from humanloop.types import DatapointResponse as Datapoint
-from humanloop.types import EvaluationResponse, EvaluationStats, VersionStatsResponse
-from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
-
-# Responses are Pydantic models and we leverage them for improved request validation
-from humanloop.types import FlowKernelRequest as Flow
-from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
-from humanloop.types import PromptKernelRequest as Prompt
-from humanloop.types import ToolKernelRequest as Tool
-from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
-from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
-from humanloop.types import DatapointResponse as Datapoint
-from humanloop.types import EvaluationStats, EvaluationResponse
-from humanloop.types.evaluation_run_response import EvaluationRunResponse
-from humanloop.types.run_stats_response import RunStatsResponse
-
-# Setup logging
-logger = logging.getLogger(__name__)
-logger.setLevel(level=INFO)
-console_handler = logging.StreamHandler()
-logger.setLevel(INFO)
-formatter = logging.Formatter("%(message)s")
-console_handler.setFormatter(formatter)
-if not logger.hasHandlers():
- logger.addHandler(console_handler)
-
-EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
-Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
-FileType = Literal["flow", "prompt", "tool", "evaluator"]
-
-
-# ANSI escape codes for logging colors
-YELLOW = "\033[93m"
-CYAN = "\033[96m"
-GREEN = "\033[92m"
-RED = "\033[91m"
-RESET = "\033[0m"
-
-
-class _SimpleProgressBar:
- """Thread-safe progress bar for the console."""
-
- def __init__(self, total: int):
- if total <= 0:
- self._total = 1
- else:
- self._total = total
- self._progress = 0
- self._lock = threading.Lock()
- self._start_time = None
-
- def increment(self):
- """Increment the progress bar by one finished task."""
- with self._lock:
- self._progress += 1
- if self._start_time is None:
- self._start_time = time.time()
-
- bar_length = 40
- block = int(round(bar_length * self._progress / self._total))
- bar = "#" * block + "-" * (bar_length - block)
-
- percentage = (self._progress / self._total) * 100
- elapsed_time = time.time() - self._start_time
- time_per_item = elapsed_time / self._progress if self._progress > 0 else 0
- eta = (self._total - self._progress) * time_per_item
-
- progress_display = f"\r[{bar}] {self._progress}/{self._total}"
- progress_display += f" ({percentage:.2f}%)"
-
- if self._progress < self._total:
- progress_display += f" | ETA: {int(eta)}s"
- else:
- progress_display += " | DONE"
-
- sys.stderr.write(progress_display)
-
- if self._progress >= self._total:
- sys.stderr.write("\n")
-
-
-# Module-level so it can be shared by threads.
-_PROGRESS_BAR: Optional[_SimpleProgressBar] = None
-
-
-def run_eval(
- client: BaseHumanloop,
- file: Union[File, Callable],
- name: Optional[str],
- dataset: Dataset,
- evaluators: Optional[Sequence[Evaluator]] = None,
- # logs: typing.Sequence[dict] | None = None,
- workers: int = 4,
-) -> List[EvaluatorCheck]:
- """
- Evaluate your function for a given `Dataset` and set of `Evaluators`.
-
- :param client: the Humanloop API client.
- :param file: the Humanloop file being evaluated, including a function to run over the dataset.
- :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File.
- :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation.
- :param evaluators: define how judgments are provided for this Evaluation.
- :param workers: the number of threads to process datapoints using your function concurrently.
- :return: per Evaluator checks.
- """
- global _PROGRESS_BAR
-
- if isinstance(file, Callable): # type: ignore
- # Decorated function
- file_: File = file.file # type: ignore
- else:
- file_ = file # type: ignore
-
- is_decorated = file_.pop("is_decorated", False)
-
- # Get or create the file on Humanloop
- version = file_.pop("version", {})
-
- # Raise error if one of path or id not provided
- if not file_.get("path") and not file_.get("id"):
- raise ValueError("You must provide a path or id in your `file`.")
-
- # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow`
- try:
- type_ = typing.cast(FileType, file_.pop("type"))
- logger.info(
- f"{CYAN}Evaluating your {type_} function corresponding to `{file_['path']}` on Humanloop{RESET} \n\n"
- )
- except KeyError as _:
- type_ = "flow"
- logger.warning("No `file` type specified, defaulting to flow.")
-
- # If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop.
- function_ = typing.cast(Optional[Callable], file_.pop("callable", None))
- if function_ is None:
- if type_ == "flow":
- raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.")
- else:
- logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.")
-
- custom_logger = file_.pop("custom_logger", None)
- file_dict = {**file_, **version}
- hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]
-
- if type_ == "flow":
- # Be more lenient with Flow versions as they are arbitrary json
- try:
- Flow.model_validate(version)
- except ValidationError:
- flow_version = {"attributes": version}
- file_dict = {**file_, **flow_version}
- hl_file = client.flows.upsert(**file_dict) # type: ignore
-
- elif type_ == "prompt":
- try:
- Prompt.model_validate(version)
- except ValidationError as error_:
- logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
- raise error_
- hl_file = client.prompts.upsert(**file_dict) # type: ignore
-
- elif type_ == "tool":
- try:
- Tool.model_validate(version)
- except ValidationError as error_:
- logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
- raise error_
- hl_file = client.tools.upsert(**file_dict) # type: ignore
-
- elif type_ == "evaluator":
- hl_file = client.evaluators.upsert(**file_dict) # type: ignore
-
- else:
- raise NotImplementedError(f"Unsupported File type: {type_}")
-
- # Upsert the Dataset
- action = dataset.get("action", "set") # set is the server default - None not allowed.
- if "datapoints" not in dataset:
- dataset["datapoints"] = []
- # Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path.
- action = "add"
- hl_dataset = client.datasets.upsert(**dataset, action=action)
- hl_dataset = client.datasets.get(id=hl_dataset.id, version_id=hl_dataset.version_id, include_datapoints=True)
-
- # Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id`
- local_evaluators: List[Evaluator] = []
- if evaluators:
- for evaluator in evaluators:
- # If a callable is provided for an Evaluator, we treat it as External
- eval_function = evaluator.get("callable")
- if eval_function is not None:
- # TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally
- if function_ is None:
- raise ValueError(
- f"Local Evaluators are only supported when generating Logs locally using your {type_}'s `callable`. Please provide a `callable` for your file in order to run Evaluators locally."
- )
- local_evaluators.append(evaluator)
- spec = ExternalEvaluator(
- arguments_type=evaluator["args_type"],
- return_type=evaluator["return_type"],
- attributes={"code": inspect.getsource(eval_function)},
- evaluator_type="external",
- )
- client.evaluators.upsert(
- id=evaluator.get("id"),
- path=evaluator.get("path"),
- spec=spec,
- )
- function_ = typing.cast(Callable, function_)
-
- # Validate signature of the called function
- function_signature = inspect.signature(function_)
- parameter_names = list(function_signature.parameters.keys())
- if parameter_names != ["inputs", "messages"] and parameter_names != ["inputs"]:
- raise ValueError(
- f"Your {type_}'s `callable` must have the signature `def "
- "function(inputs: dict, messages: Optional[dict] = None):` "
- "or `def function(inputs: dict):`"
- )
-
- # Validate upfront that the local Evaluators and Dataset fit
- requires_target = False
- for local_evaluator in local_evaluators:
- if local_evaluator["args_type"] == "target_required":
- requires_target = True
- break
- if requires_target:
- missing_target = 0
- for datapoint in hl_dataset.datapoints: # type: ignore
- if not datapoint.target:
- missing_target += 1
- if missing_target > 0:
- raise ValueError(
- f"{missing_target} Datapoints have no target. A target is required for the Evaluator: {local_evaluator['path']}"
- )
-
- # Get or create the Evaluation based on the name
- evaluation = None
- try:
- evaluation = client.evaluations.create(
- name=name,
- dataset={"file_id": hl_dataset.id},
- evaluators=[{"path": e["path"]} for e in evaluators], # type: ignore
- file={"id": hl_file.id},
- )
- except ApiError as error_:
- # If the name exists, go and get it # TODO: Update API GET to allow querying by name and file.
- if error_.status_code == 409:
- evals = client.evaluations.list(file_id=hl_file.id, size=50)
- for page in evals.iter_pages():
- evaluation = next((e for e in page.items if e.name == name), None) # type: ignore
- else:
- raise error_
- if not evaluation:
- raise ValueError(f"Evaluation with name {name} not found.")
-
- # Create a new Run
- run: EvaluationRunResponse = client.evaluations.create_run(
- id=evaluation.id,
- dataset={"version_id": hl_dataset.version_id},
- orchestrated=False,
- )
- # Every Run will generate a new batch of Logs
- run_id = run.id
-
- # Define the function to execute your function in parallel and Log to Humanloop
- def process_datapoint(datapoint: Datapoint):
- start_time = datetime.now()
- datapoint_dict = datapoint.dict()
- try:
- if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
- output = function_(**datapoint_dict["inputs"], messages=datapoint_dict["messages"])
- else:
- # function_ is decorated by Humanloop, the OTel Exporter will
- # handle the logging, which will call the upload_callback
- # function above when it's done
- function_(datapoint_dict["inputs"]) # type: ignore
-
- else:
- # Define the function to execute your function in parallel and Log to Humanloop
- def process_datapoint(dp: Datapoint, evaluated_file_id: str, run_id: str):
- log_func = _get_log_func(
- client=client,
- file_type=type_,
- file_id=hl_file.id,
- version_id=hl_file.version_id,
- run_id=run_id,
- )
-
- start_time = datetime.now()
- datapoint_dict = dp.dict()
- try:
- if "messages" in datapoint_dict:
- output = function_( # type: ignore
- **datapoint_dict["inputs"],
- messages=datapoint_dict["messages"],
- )
- else:
- output = function_(**datapoint_dict["inputs"]) # type: ignore
- if custom_logger:
- log = custom_logger(client=client, output=output) # type: ignore
- else:
- if not isinstance(output, str):
- raise ValueError(
- f"Your {type_}'s `callable` must return a string if you do not provide a custom logger."
- )
- log = log_func(
- inputs=dp.inputs,
- output=output,
- source_datapoint_id=dp.id,
- start_time=start_time,
- end_time=datetime.now(),
- )
- except Exception as e:
- log = log_func(
- inputs=dp.inputs,
- error=str(e),
- source_datapoint_id=dp.id,
- start_time=start_time,
- end_time=datetime.now(),
- )
- logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
-
- _add_log_to_evaluation(
- client=client,
- log=log,
- datapoint_target=dp.target,
- local_evaluators=local_evaluators,
- )
- _PROGRESS_BAR.increment() # type: ignore
-
- # Execute the function and send the logs to Humanloop in parallel
- logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n")
- logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}")
- logger.info(f"{CYAN}Run ID: {run_id}{RESET}")
-
- # Generate locally if a file `callable` is provided
- if function_: # type: ignore
- logger.info(
- f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} "
- )
- with ThreadPoolExecutor(max_workers=workers) as executor:
- for datapoint in hl_dataset.datapoints:
- executor.submit(
- process_datapoint,
- datapoint,
- hl_file.id,
- run_id,
- )
- else:
- # TODO: trigger run when updated API is available
- logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}")
-
- # Wait for the Evaluation to complete then print the results
- complete = False
-
- while not complete:
- stats = client.evaluations.get_stats(id=evaluation.id)
- logger.info(f"\r{stats.progress}")
- run_stats = next(
- (run_stats for run_stats in stats.run_stats if run_stats.run_id == run_id),
- None,
- )
- complete = run_stats is not None and run_stats.status == "completed"
- if not complete:
- time.sleep(5)
-
- # Print Evaluation results
- logger.info(stats.report)
-
- checks: List[EvaluatorCheck] = []
-
- # Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run.
- # (Or the logs would not be helpful)
- if any(evaluator.get("threshold") is not None for evaluator in evaluators) or len(stats.run_stats) > 1:
- for evaluator in evaluators:
- _, score, delta = check_evaluation_improvement(
- evaluation=evaluation,
- stats=stats,
- evaluator_path=evaluator["path"],
- run_id=run_id,
- )
- threshold_check = None
- threshold = evaluator.get("threshold")
- if threshold is not None:
- threshold_check = check_evaluation_threshold(
- evaluation=evaluation,
- stats=stats,
- evaluator_path=evaluator["path"],
- threshold=threshold,
- run_id=run_id,
- )
- checks.append(
- EvaluatorCheck(
- path=evaluator["path"],
- # TODO: Add back in with number valence on Evaluators
- # improvement_check=improvement_check,
- score=score,
- delta=delta,
- threshold=threshold,
- threshold_check=threshold_check,
- evaluation_id=evaluation.id,
- )
- )
-
- logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n")
- return checks
-
-
-def _get_log_func(
- client: BaseHumanloop,
- file_type: FileType,
- file_id: str,
- version_id: str,
- run_id: str,
-) -> Callable:
- """Returns the appropriate log function pre-filled with common parameters."""
- log_request = {
- # TODO: why does the Log `id` field refer to the file ID in the API?
- # Why are both `id` and `version_id` needed in the API?
- "id": file_id,
- "version_id": version_id,
- "run_id": run_id,
- }
- if file_type == "flow":
- return partial(client.flows.log, **log_request, trace_status="complete")
- elif file_type == "prompt":
- return partial(client.prompts.log, **log_request)
- elif file_type == "evaluator":
- return partial(client.evaluators.log, **log_request)
- elif file_type == "tool":
- return partial(client.tools.log, **log_request)
- else:
- raise NotImplementedError(f"Unsupported File version: {file_type}")
-
-
-def get_score_from_evaluator_stat(
- stat: Union[NumericStats, BooleanStats],
-) -> Union[float, None]:
- """Get the score from an Evaluator Stat."""
- score = None
- if isinstance(stat, BooleanStats):
- if stat.total_logs:
- score = round(stat.num_true / stat.total_logs, 2)
- elif isinstance(stat, NumericStats):
- score = round(stat.mean, 2) # type: ignore
- else:
- raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}")
- return score # type: ignore
-
-
-class _SimpleProgressBar:
- def __init__(self, total: int):
- if total <= 0:
- self._total = 1
- else:
- self._total = total
- self._progress = 0
- self._lock = threading.Lock()
- self._start_time = None
-
- def increment(self):
- with self._lock:
- self._progress += 1
- if self._start_time is None:
- self._start_time = time.time()
-
- bar_length = 40
- block = int(round(bar_length * self._progress / self._total))
- bar = "#" * block + "-" * (bar_length - block)
-
- percentage = (self._progress / self._total) * 100
- elapsed_time = time.time() - self._start_time
- time_per_item = elapsed_time / self._progress if self._progress > 0 else 0
- eta = (self._total - self._progress) * time_per_item
-
- progress_display = f"\r[{bar}] {self._progress}/{self._total}"
- progress_display += f" ({percentage:.2f}%)"
-
- if self._progress < self._total:
- progress_display += f" | ETA: {int(eta)}s"
- else:
- progress_display += " | DONE"
-
- sys.stderr.write(progress_display)
-
- if self._progress >= self._total:
- sys.stderr.write("\n")
-
-
-_PROGRESS_BAR = None
-
-
-def get_evaluator_stats_by_path(
- stat: RunStatsResponse,
- evaluation: EvaluationResponse,
-) -> Dict[str, Union[NumericStats, BooleanStats]]:
- """Get the Evaluator stats by path."""
- # TODO: Update the API so this is not necessary
- evaluators_by_id = {evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators}
- evaluator_stats_by_path = {
- evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat
- for evaluator_stat in stat.evaluator_stats
- }
- return evaluator_stats_by_path # type: ignore
-
-
-def _check_evaluation_threshold(
- evaluation: EvaluationResponse,
- stats: EvaluationStats,
- evaluator_path: str,
- threshold: float,
- run_id: str,
-) -> bool:
- """Checks if the latest version has an average Evaluator result above a threshold."""
- # TODO: Update the API so this is not necessary
- evaluator_stats_by_path = get_evaluator_stats_by_path(
- stat=next((stat for stat in stats.run_stats if stat.run_id == run_id), None),
- evaluation=evaluation,
- )
- if evaluator_path in evaluator_stats_by_path:
- evaluator_stat = evaluator_stats_by_path[evaluator_path]
- score = get_score_from_evaluator_stat(stat=evaluator_stat)
- if score >= threshold:
- logger.info(
- f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}"
- )
- return True
- else:
- logger.info(
- f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}"
- )
- return False
- else:
- raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
-
-
-def _check_evaluation_improvement(
- evaluation: EvaluationResponse,
- evaluator_path: str,
- stats: EvaluationStats,
- run_id: str,
-) -> Tuple[bool, float, float]:
- """
- Check the latest version has improved across for a specific Evaluator.
-
- :returns: A tuple of (improvement, latest_score, delta since previous score)
- """
- # TODO: Update the API so this is not necessary
-
- latest_evaluator_stats_by_path = get_evaluator_stats_by_path(
- stat=next((stat for stat in stats.run_stats if stat.run_id == run_id), None),
- evaluation=evaluation,
- )
- if len(stats.run_stats) == 1:
- logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}")
- return True, 0, 0
-
- previous_evaluator_stats_by_path = get_evaluator_stats_by_path(
- stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1
- evaluation=evaluation,
- )
- if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path:
- latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path]
- previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path]
- latest_score = get_score_from_evaluator_stat(stat=latest_evaluator_stat)
- previous_score = get_score_from_evaluator_stat(stat=previous_evaluator_stat)
- if latest_score is None or previous_score is None:
- raise ValueError(f"Could not find score for Evaluator {evaluator_path}.")
- diff = round(latest_score - previous_score, 2)
- if diff >= 0:
- logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
- return True, latest_score, diff
- else:
- logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
- return False, latest_score, diff
- else:
- raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
-
-
-def _add_log_to_evaluation(
- client: BaseHumanloop,
- log: dict,
- datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
- local_evaluators: list[Evaluator],
-):
- for local_evaluator in local_evaluators:
- start_time = datetime.now()
- try:
- eval_function = local_evaluator["callable"]
- if local_evaluator["args_type"] == "target_required":
- judgement = eval_function(
- log,
- datapoint_target,
- )
- else:
- judgement = eval_function(log)
-
- if local_evaluator.get("custom_logger", None):
- local_evaluator["custom_logger"](judgement, start_time, datetime.now())
- else:
- _ = client.evaluators.log(
- parent_id=log["id"],
- judgment=judgement,
- id=local_evaluator.get("id"),
- path=local_evaluator.get("path"),
- start_time=start_time,
- end_time=datetime.now(),
- )
- except Exception as e:
- _ = client.evaluators.log(
- parent_id=log["id"],
- path=local_evaluator.get("path"),
- id=local_evaluator.get("id"),
- error=str(e),
- start_time=start_time,
- end_time=datetime.now(),
- )
- logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
+__all__ = ["run_eval", "log_with_evaluation_context", "File"]
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
index 89082742..a055faee 100644
--- a/src/humanloop/eval_utils/context.py
+++ b/src/humanloop/eval_utils/context.py
@@ -4,27 +4,27 @@
class EvaluationContext(TypedDict):
- """Context required by the Exporter when uploading a Log to Humanloop.
+ """Context Log to Humanloop.
- When using the evaluation run utility on decorated functions, the utility
- has does not control the Log upload - the Exporter does. This context class
- propagates the required information to the exporter and allows it to notify
- the utility via a callback.
+ Global state that is set when an Evaluation is ran.
"""
- """Required for uploading the Log in the Exporter."""
+ """Required for associating a Log with the Evaluation Run."""
source_datapoint_id: str
"""Exporter calls this so the eval_utils are notified to evaluate an uploaded Log."""
upload_callback: Callable[[dict], None]
- """Logs of multiple Files can be uploaded by the Exporter while
- evaluating a single one of them. This identifies the File that
- owns Logs that are part of the Evaluation."""
- evaluated_file_id: str
+ """ID of the evaluated File."""
+ file_id: str
- """Required for uploading the Log in the Exporter."""
+ """Path of the evaluated File."""
+ path: str
+
+ """Required for associating a Log with the Evaluation Run."""
run_id: str
-EVALUATION_CONTEXT: ContextVar[typing.Optional[EvaluationContext]] = ContextVar("__EVALUATION_CONTEXT")
+EVALUATION_CONTEXT_VAR_NAME = "__EVALUATION_CONTEXT"
+
+EVALUATION_CONTEXT: ContextVar[typing.Optional[EvaluationContext]] = ContextVar(EVALUATION_CONTEXT_VAR_NAME)
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
new file mode 100644
index 00000000..c537fae0
--- /dev/null
+++ b/src/humanloop/eval_utils/run.py
@@ -0,0 +1,737 @@
+"""
+Evaluation utils for the Humanloop SDK.
+
+This module provides a set of utilities to aid running Eval workflows on Humanloop
+where you are managing the runtime of your application in your code.
+
+Functions in this module should be accessed via the Humanloop client. They should
+not be called directly.
+"""
+
+import inspect
+import logging
+import sys
+import threading
+import time
+import types
+import typing
+from concurrent.futures import ThreadPoolExecutor
+from datetime import datetime
+from functools import partial
+from logging import INFO
+from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union
+
+from pydantic import ValidationError
+
+
+from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
+
+from humanloop.prompts.client import PromptsClient
+from humanloop.core.api_error import ApiError
+from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
+
+# We use TypedDicts for requests, which is consistent with the rest of the SDK
+from humanloop.evaluators.client import EvaluatorsClient
+from humanloop.flows.client import FlowsClient
+from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
+from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
+from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests import HumanEvaluatorRequestParams as HumanEvaluatorDict
+from humanloop.requests import LlmEvaluatorRequestParams as LLMEvaluatorDict
+from humanloop.requests import PromptKernelRequestParams as PromptDict
+from humanloop.requests import ToolKernelRequestParams as ToolDict
+from humanloop.tools.client import ToolsClient
+from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
+from humanloop.types import DatapointResponse as Datapoint
+from humanloop.types import EvaluationResponse, EvaluationStats
+from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
+from humanloop.types.create_flow_log_response import CreateFlowLogResponse
+from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
+from humanloop.types.create_tool_log_response import CreateToolLogResponse
+from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
+
+# Responses are Pydantic models and we leverage them for improved request validation
+from humanloop.types import FlowKernelRequest as Flow
+from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
+from humanloop.types import PromptKernelRequest as Prompt
+from humanloop.types import ToolKernelRequest as Tool
+from humanloop.types.evaluation_run_response import EvaluationRunResponse
+from humanloop.types.run_stats_response import RunStatsResponse
+
+if typing.TYPE_CHECKING:
+ from humanloop.client import BaseHumanloop
+
+# Setup logging
+logger = logging.getLogger(__name__)
+logger.setLevel(level=INFO)
+console_handler = logging.StreamHandler()
+logger.setLevel(INFO)
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+EvaluatorDict = Union[CodeEvaluatorDict, LLMEvaluatorDict, HumanEvaluatorDict, ExternalEvaluator]
+Version = Union[FlowDict, PromptDict, ToolDict, EvaluatorDict]
+FileType = Literal["flow", "prompt", "tool", "evaluator"]
+
+
+# ANSI escape codes for logging colors
+YELLOW = "\033[93m"
+CYAN = "\033[96m"
+GREEN = "\033[92m"
+RED = "\033[91m"
+RESET = "\033[0m"
+
+
+CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient)
+
+
+def log_with_evaluation_context(client: CLIENT_TYPE) -> CLIENT_TYPE:
+ """
+ Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT.
+
+ This makes the overloaded log actions be aware of whether the created Log is
+ part of an Evaluation (e.g. one started by eval_utils.run_eval).
+ """
+
+ def _is_evaluated_file(
+ evaluation_context: EvaluationContext,
+ log_args: dict,
+ file_id_attribute: str,
+ ) -> bool:
+ """Check if the File that will Log against is part of the current Evaluation.
+
+ The user of the .log API can refer to the File that owns that Log either by
+ ID or Path. This function matches against any of them in EvaluationContext.
+ """
+ return evaluation_context.get("file_id") == log_args.get(file_id_attribute) or evaluation_context.get(
+ "path"
+ ) == log_args.get("path")
+
+ # Copy the original log method in a hidden attribute
+ client._log = client.log
+
+ def _overloaded_log(
+ self,
+ **kwargs,
+ ) -> Union[
+ CreatePromptLogResponse,
+ CreateToolLogResponse,
+ CreateFlowLogResponse,
+ CreateEvaluatorLogResponse,
+ ]:
+ evaluation_context: EvaluationContext
+ try:
+ evaluation_context = EVALUATION_CONTEXT.get() or {} # type: ignore
+ except LookupError:
+ evaluation_context = {} # type: ignore
+
+ if isinstance(client, PromptsClient):
+ file_id_attribute = "prompt_id"
+ elif isinstance(client, ToolsClient):
+ file_id_attribute = "tool_id"
+ elif isinstance(client, FlowsClient):
+ file_id_attribute = "flow_id"
+ elif isinstance(client, EvaluatorsClient):
+ file_id_attribute = "evaluator_id"
+
+ if _is_evaluated_file(
+ evaluation_context=evaluation_context, # type: ignore
+ log_args=kwargs,
+ file_id_attribute=file_id_attribute,
+ ):
+ # If the .log API user does not provide the source_datapoint_id or run_id,
+ # override them with the values from the EvaluationContext
+ for attribute in ["source_datapoint_id", "run_id"]:
+ if attribute not in kwargs or kwargs[attribute] is None:
+ kwargs[attribute] = evaluation_context.get(attribute)
+
+ # Call the original .log method
+ response = self._log(**kwargs)
+
+ # Call the callback so the Evaluation can be updated
+ if _is_evaluated_file(
+ evaluation_context=evaluation_context, # type: ignore
+ log_args=kwargs,
+ file_id_attribute=file_id_attribute,
+ ):
+ # Notify that the Log has been added to the Evaluation
+ evaluation_context["upload_callback"](
+ {
+ "id": response.id,
+ **kwargs,
+ }
+ )
+ # Log has been added to evaluation, reset the context for current Thread
+
+ return response
+
+ # Replace the original log method with the overloaded one
+ client.log = types.MethodType(_overloaded_log, client) # type: ignore
+ # Return the client with the overloaded log method
+ return client
+
+
+class _SimpleProgressBar:
+ """Thread-safe progress bar for the console."""
+
+ def __init__(self, total: int):
+ if total <= 0:
+ self._total = 1
+ else:
+ self._total = total
+ self._progress = 0
+ self._lock = threading.Lock()
+ self._start_time = None
+
+ def increment(self):
+ """Increment the progress bar by one finished task."""
+ with self._lock:
+ self._progress += 1
+ if self._start_time is None:
+ self._start_time = time.time()
+
+ bar_length = 40
+ block = int(round(bar_length * self._progress / self._total))
+ bar = "#" * block + "-" * (bar_length - block)
+
+ percentage = (self._progress / self._total) * 100
+ elapsed_time = time.time() - self._start_time
+ time_per_item = elapsed_time / self._progress if self._progress > 0 else 0
+ eta = (self._total - self._progress) * time_per_item
+
+ progress_display = f"\r[{bar}] {self._progress}/{self._total}"
+ progress_display += f" ({percentage:.2f}%)"
+
+ if self._progress < self._total:
+ progress_display += f" | ETA: {int(eta)}s"
+ else:
+ progress_display += " | DONE"
+
+ sys.stderr.write(progress_display)
+
+ if self._progress >= self._total:
+ sys.stderr.write("\n")
+
+
+# Module-level so it can be shared by threads.
+_PROGRESS_BAR: Optional[_SimpleProgressBar] = None
+
+
+def run_eval(
+ client: "BaseHumanloop",
+ file: Union[File, Callable],
+ name: Optional[str],
+ dataset: Dataset,
+ evaluators: Optional[Sequence[Evaluator]] = None,
+ workers: int = 4,
+) -> List[EvaluatorCheck]:
+ """
+ Evaluate your function for a given `Dataset` and set of `Evaluators`.
+
+ :param client: the Humanloop API client.
+ :param file: the Humanloop file being evaluated, including a function to run over the dataset.
+ :param name: the name of the Evaluation to run. If it does not exist, a new Evaluation will be created under your File.
+ :param dataset: the dataset to map your function over to produce the outputs required by the Evaluation.
+ :param evaluators: define how judgments are provided for this Evaluation.
+ :param workers: the number of threads to process datapoints using your function concurrently.
+ :return: per Evaluator checks.
+ """
+ global _PROGRESS_BAR
+
+ if isinstance(file, Callable): # type: ignore
+ # Decorated function
+ file_: File = file.file # type: ignore
+ else:
+ file_ = file # type: ignore
+
+ is_decorated = file_.pop("is_decorated", False)
+
+ # Get or create the file on Humanloop
+ version = file_.pop("version", {})
+
+ # Raise error if one of path or id not provided
+ if not file_.get("path") and not file_.get("id"):
+ raise ValueError("You must provide a path or id in your `file`.")
+
+ # Determine the `type` of the `file` to Evaluate - if not `type` provided, default to `flow`
+ try:
+ type_ = typing.cast(FileType, file_.pop("type"))
+ logger.info(
+ f"{CYAN}Evaluating your {type_} function corresponding to `{file_['path']}` on Humanloop{RESET} \n\n"
+ )
+ except KeyError as _:
+ type_ = "flow"
+ logger.warning("No `file` type specified, defaulting to flow.")
+
+ # If a `callable` is provided, Logs will be generated locally, otherwise Logs will be generated on Humanloop.
+ function_ = typing.cast(Optional[Callable], file_.pop("callable", None))
+ if function_ is None:
+ if type_ == "flow":
+ raise ValueError("You must provide a `callable` for your Flow `file` to run a local eval.")
+ else:
+ logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.")
+
+ custom_logger = file_.pop("custom_logger", None)
+ file_dict = {**file_, **version}
+ hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]
+
+ # NOTE: This could be cleaner, use polymorphism to avoid the if-else
+ if type_ == "flow":
+ # Be more lenient with Flow versions as they are arbitrary json
+ try:
+ Flow.model_validate(version)
+ except ValidationError:
+ flow_version = {"attributes": version}
+ file_dict = {**file_, **flow_version}
+ hl_file = client.flows.upsert(**file_dict) # type: ignore
+
+ elif type_ == "prompt":
+ try:
+ Prompt.model_validate(version)
+ except ValidationError as error_:
+ logger.error(msg="Invalid Prompt `version` in your `file` request. \n\nValidation error: \n)")
+ raise error_
+ try:
+ hl_file = client.prompts.upsert(**file_dict) # type: ignore
+ except ApiError as error_:
+ raise error_
+
+ elif type_ == "tool":
+ try:
+ Tool.model_validate(version)
+ except ValidationError as error_:
+ logger.error(msg="Invalid Tool `version` in your `file` request. \n\nValidation error: \n)")
+ raise error_
+ hl_file = client.tools.upsert(**file_dict) # type: ignore
+
+ elif type_ == "evaluator":
+ hl_file = client.evaluators.upsert(**file_dict) # type: ignore
+
+ else:
+ raise NotImplementedError(f"Unsupported File type: {type_}")
+
+ # Upsert the Dataset
+ hl_dataset = client.datasets.upsert(**dataset)
+ hl_dataset = client.datasets.get(id=hl_dataset.id, include_datapoints=True)
+
+ # Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id`
+ local_evaluators: List[Evaluator] = []
+ if evaluators:
+ for evaluator in evaluators:
+ # If a callable is provided for an Evaluator, we treat it as External
+ eval_function = evaluator.get("callable")
+ if eval_function is not None:
+ # TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally
+ if function_ is None:
+ raise ValueError(
+ f"Local Evaluators are only supported when generating Logs locally using your {type_}'s `callable`. Please provide a `callable` for your file in order to run Evaluators locally."
+ )
+ local_evaluators.append(evaluator)
+ spec = ExternalEvaluator(
+ arguments_type=evaluator["args_type"],
+ return_type=evaluator["return_type"],
+ attributes={"code": inspect.getsource(eval_function)},
+ evaluator_type="external",
+ )
+ client.evaluators.upsert(
+ id=evaluator.get("id"),
+ path=evaluator.get("path"),
+ spec=spec,
+ )
+ function_ = typing.cast(Callable, function_)
+
+ # Validate signature of the called function
+ function_signature = inspect.signature(function_)
+ parameter_names = list(function_signature.parameters.keys())
+ if parameter_names != ["inputs", "messages"] and parameter_names != ["inputs"]:
+ raise ValueError(
+ f"Your {type_}'s `callable` must have the signature `def "
+ "function(inputs: dict, messages: Optional[dict] = None):` "
+ "or `def function(inputs: dict):`"
+ )
+
+ # Validate upfront that the local Evaluators and Dataset fit
+ requires_target = False
+ for local_evaluator in local_evaluators:
+ if local_evaluator["args_type"] == "target_required":
+ requires_target = True
+ break
+ if requires_target:
+ missing_target = 0
+ for datapoint in hl_dataset.datapoints: # type: ignore
+ if not datapoint.target:
+ missing_target += 1
+ if missing_target > 0:
+ raise ValueError(
+ f"{missing_target} Datapoints have no target. A target is required for the Evaluator: {local_evaluator['path']}"
+ )
+
+ # Get or create the Evaluation based on the name
+ evaluation = None
+ try:
+ evaluation = client.evaluations.create(
+ name=name,
+ evaluators=[{"path": e["path"]} for e in evaluators], # type: ignore
+ file={"id": hl_file.id},
+ )
+ except ApiError as error_:
+ # If the name exists, go and get it # TODO: Update API GET to allow querying by name and file.
+ if error_.status_code == 409:
+ evals = client.evaluations.list(file_id=hl_file.id, size=50)
+ for page in evals.iter_pages():
+ evaluation = next((e for e in page.items if e.name == name), None) # type: ignore
+ else:
+ raise error_
+ if not evaluation:
+ raise ValueError(f"Evaluation with name {name} not found.")
+
+ # Create a new Run
+ run: EvaluationRunResponse = client.evaluations.create_run(
+ id=evaluation.id,
+ dataset={"version_id": hl_dataset.version_id},
+ orchestrated=False,
+ )
+ # Every Run will generate a new batch of Logs
+ run_id = run.id
+
+ _PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) # type: ignore
+
+ if is_decorated:
+
+ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
+ def upload_callback(log: dict):
+ # OTel exporter will call this after the Log is uploaded
+ _add_log_to_evaluation(
+ client=client,
+ log=log,
+ datapoint_target=dp.target,
+ local_evaluators=local_evaluators,
+ )
+ _PROGRESS_BAR.increment() # type: ignore
+
+ datapoint_dict = dp.dict()
+ # Set the Evaluation Context for the Exporter
+ # Each thread will have its own context
+ EVALUATION_CONTEXT.set(
+ EvaluationContext(
+ source_datapoint_id=dp.id,
+ upload_callback=upload_callback,
+ file_id=file_id,
+ run_id=run_id,
+ path=file_path,
+ )
+ )
+ if datapoint_dict.get("messages"):
+ # function_ is decorated by Humanloop, the OTel Exporter will
+ # handle the logging, which will call the upload_callback
+ # function above when it's done
+ function_( # type: ignore
+ datapoint_dict["inputs"],
+ messages=datapoint_dict["messages"],
+ )
+ else:
+ # function_ is decorated by Humanloop, the OTel Exporter will
+ # handle the logging, which will call the upload_callback
+ # function above when it's done
+ function_(datapoint_dict["inputs"]) # type: ignore
+
+ else:
+ # Define the function to execute your function in parallel and Log to Humanloop
+ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
+ log_func = _get_log_func(
+ client=client,
+ file_type=type_,
+ file_id=hl_file.id,
+ version_id=hl_file.version_id,
+ run_id=run_id,
+ )
+
+ start_time = datetime.now()
+ datapoint_dict = dp.dict()
+ try:
+ if "messages" in datapoint_dict:
+ output = function_( # type: ignore
+ **datapoint_dict["inputs"],
+ messages=datapoint_dict["messages"],
+ )
+ else:
+ output = function_(**datapoint_dict["inputs"]) # type: ignore
+ if custom_logger:
+ log = custom_logger(client=client, output=output) # type: ignore
+ else:
+ if not isinstance(output, str):
+ raise ValueError(
+ f"Your {type_}'s `callable` must return a string if you do not provide a custom logger."
+ )
+ log = log_func(
+ inputs=dp.inputs,
+ output=output,
+ source_datapoint_id=dp.id,
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ except Exception as e:
+ log = log_func(
+ inputs=dp.inputs,
+ error=str(e),
+ source_datapoint_id=dp.id,
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
+
+ _add_log_to_evaluation(
+ client=client,
+ log=log,
+ datapoint_target=dp.target,
+ local_evaluators=local_evaluators,
+ )
+ _PROGRESS_BAR.increment() # type: ignore
+
+ # Execute the function and send the logs to Humanloop in parallel
+ logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n")
+ logger.info(f"{CYAN}{type_.capitalize()} Version ID: {hl_file.version_id}{RESET}")
+ logger.info(f"{CYAN}Run ID: {run_id}{RESET}")
+
+ # Generate locally if a file `callable` is provided
+ if function_: # type: ignore
+ logger.info(
+ f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}' using {workers} workers{RESET} "
+ )
+ with ThreadPoolExecutor(max_workers=workers) as executor:
+ for datapoint in hl_dataset.datapoints: # type: ignore
+ executor.submit(
+ process_datapoint,
+ datapoint,
+ hl_file.id,
+ hl_file.path,
+ run_id,
+ )
+ else:
+ # TODO: trigger run when updated API is available
+ logger.info(f"{CYAN}\nRunning '{hl_file.name}' over the Dataset '{hl_dataset.name}'{RESET}")
+
+ # Wait for the Evaluation to complete then print the results
+ complete = False
+
+ while not complete:
+ stats = client.evaluations.get_stats(id=evaluation.id)
+ logger.info(f"\r{stats.progress}")
+ complete = stats.status == "completed"
+ if not complete:
+ time.sleep(5)
+
+ # Print Evaluation results
+ logger.info(stats.report)
+
+ checks: List[EvaluatorCheck] = []
+
+ # Skip `check_evaluation_improvement` if no thresholds were provided and there is only one run.
+ # (Or the logs would not be helpful)
+ if any(evaluator.get("threshold") is not None for evaluator in evaluators) or len(stats.run_stats) > 1: # type: ignore
+ for evaluator in evaluators: # type: ignore
+ score, delta = _check_evaluation_improvement(
+ evaluation=evaluation,
+ stats=stats,
+ evaluator_path=evaluator["path"],
+ run_id=run_id,
+ )[1:]
+ threshold_check = None
+ threshold = evaluator.get("threshold")
+ if threshold is not None:
+ threshold_check = _check_evaluation_threshold(
+ evaluation=evaluation,
+ stats=stats,
+ evaluator_path=evaluator["path"],
+ threshold=threshold,
+ run_id=run_id,
+ )
+ checks.append(
+ EvaluatorCheck(
+ path=evaluator["path"],
+ # TODO: Add back in with number valence on Evaluators
+ # improvement_check=improvement_check,
+ score=score,
+ delta=delta,
+ threshold=threshold,
+ threshold_check=threshold_check,
+ evaluation_id=evaluation.id,
+ )
+ )
+
+ logger.info(f"\n{CYAN}View your Evaluation:{RESET}\n{evaluation.url}\n")
+ return checks
+
+
+def _get_log_func(
+ client: "BaseHumanloop",
+ file_type: FileType,
+ file_id: str,
+ version_id: str,
+ run_id: str,
+) -> Callable:
+ """Returns the appropriate log function pre-filled with common parameters."""
+ log_request = {
+ # TODO: why does the Log `id` field refer to the file ID in the API?
+ # Why are both `id` and `version_id` needed in the API?
+ "id": file_id,
+ "version_id": version_id,
+ "run_id": run_id,
+ }
+ if file_type == "flow":
+ return partial(client.flows.log, **log_request, trace_status="complete")
+ elif file_type == "prompt":
+ return partial(client.prompts.log, **log_request)
+ elif file_type == "evaluator":
+ return partial(client.evaluators.log, **log_request)
+ elif file_type == "tool":
+ return partial(client.tools.log, **log_request)
+ else:
+ raise NotImplementedError(f"Unsupported File version: {file_type}")
+
+
+def _get_score_from_evaluator_stat(
+ stat: Union[NumericStats, BooleanStats],
+) -> Union[float, None]:
+ """Get the score from an Evaluator Stat."""
+ score = None
+ if isinstance(stat, BooleanStats):
+ if stat.total_logs:
+ score = round(stat.num_true / stat.total_logs, 2)
+ elif isinstance(stat, NumericStats):
+ score = round(stat.mean, 2) # type: ignore
+ else:
+ raise ValueError(f"Unsupported Evaluator Stat type: {type(stat)}")
+ return score # type: ignore
+
+
+def _get_evaluator_stats_by_path(
+ stat: RunStatsResponse,
+ evaluation: EvaluationResponse,
+) -> Dict[str, Union[NumericStats, BooleanStats]]:
+ """Get the Evaluator stats by path."""
+ # TODO: Update the API so this is not necessary
+ evaluators_by_id = {evaluator.version.version_id: evaluator for evaluator in evaluation.evaluators}
+ evaluator_stats_by_path = {
+ evaluators_by_id[evaluator_stat.evaluator_version_id].version.path: evaluator_stat
+ for evaluator_stat in stat.evaluator_stats
+ }
+ return evaluator_stats_by_path # type: ignore
+
+
+def _check_evaluation_threshold(
+ evaluation: EvaluationResponse,
+ stats: EvaluationStats,
+ evaluator_path: str,
+ threshold: float,
+ run_id: str,
+) -> bool:
+ """Checks if the latest version has an average Evaluator result above a threshold."""
+ # TODO: Update the API so this is not necessary
+ evaluator_stats_by_path = _get_evaluator_stats_by_path(
+ stat=next(
+ (stat for stat in stats.run_stats if stat.run_id == run_id),
+ None, # type: ignore
+ ),
+ evaluation=evaluation,
+ )
+ if evaluator_path in evaluator_stats_by_path:
+ evaluator_stat = evaluator_stats_by_path[evaluator_path]
+ score = _get_score_from_evaluator_stat(stat=evaluator_stat)
+ if score >= threshold: # type: ignore
+ logger.info(
+ f"{GREEN}✅ Latest eval [{score}] above threshold [{threshold}] for evaluator {evaluator_path}.{RESET}"
+ )
+ return True
+ else:
+ logger.info(
+ f"{RED}❌ Latest score [{score}] below the threshold [{threshold}] for evaluator {evaluator_path}.{RESET}"
+ )
+ return False
+ else:
+ raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
+
+
+def _check_evaluation_improvement(
+ evaluation: EvaluationResponse,
+ evaluator_path: str,
+ stats: EvaluationStats,
+ run_id: str,
+) -> Tuple[bool, float, float]:
+ """
+ Check the latest version has improved across for a specific Evaluator.
+
+ :returns: A tuple of (improvement, latest_score, delta since previous score)
+ """
+ # TODO: Update the API so this is not necessary
+
+ latest_evaluator_stats_by_path = _get_evaluator_stats_by_path(
+ stat=next(
+ (stat for stat in stats.run_stats if stat.run_id == run_id),
+ None, # type: ignore
+ ),
+ evaluation=evaluation,
+ )
+ if len(stats.run_stats) == 1:
+ logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}")
+ return True, 0, 0
+
+ previous_evaluator_stats_by_path = _get_evaluator_stats_by_path(stat=stats.run_stats[-2], evaluation=evaluation)
+ if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path:
+ latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path]
+ previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path]
+ latest_score = _get_score_from_evaluator_stat(stat=latest_evaluator_stat)
+ previous_score = _get_score_from_evaluator_stat(stat=previous_evaluator_stat)
+ diff = round(latest_score - previous_score, 2) # type: ignore
+ if diff >= 0:
+ logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
+ return True, latest_score, diff # type: ignore
+ else:
+ logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
+ return False, latest_score, diff # type: ignore
+ else:
+ raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
+
+
+def _add_log_to_evaluation(
+ client: "BaseHumanloop",
+ log: dict,
+ datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
+ local_evaluators: list[Evaluator],
+):
+ for local_evaluator in local_evaluators:
+ start_time = datetime.now()
+ try:
+ eval_function = local_evaluator["callable"]
+ if local_evaluator["args_type"] == "target_required":
+ judgement = eval_function(
+ log,
+ datapoint_target,
+ )
+ else:
+ judgement = eval_function(log)
+
+ if local_evaluator.get("custom_logger", None):
+ local_evaluator["custom_logger"](judgement, start_time, datetime.now())
+ else:
+ _ = client.evaluators.log(
+ parent_id=log["id"],
+ judgment=judgement,
+ id=local_evaluator.get("id"),
+ path=local_evaluator.get("path"),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ except Exception as e:
+ _ = client.evaluators.log(
+ parent_id=log["id"],
+ path=local_evaluator.get("path"),
+ id=local_evaluator.get("id"),
+ error=str(e),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
+ logger.warning(f"\nEvaluator {local_evaluator['path']} failed with error {str(e)}")
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 739f6489..ad501d58 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -11,8 +11,7 @@
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.core import ApiError as HumanloopApiError
-from humanloop.core.request_options import RequestOptions
-from humanloop.eval_utils import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
@@ -51,23 +50,30 @@ def __init__(
self._upload_queue: Queue = Queue()
# Worker threads to export the spans
self._threads: list[Thread] = [
- Thread(target=self._do_work, daemon=True) for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS)
+ Thread(
+ target=self._do_work,
+ daemon=True,
+ )
+ for _ in range(worker_threads or self.DEFAULT_NUMBER_THREADS)
]
- self._shutdown: bool = (
- False # Signals threads no more work will arrive and they should wind down if the queue is empty
- )
+ # Signals threads no more work will arrive and
+ # they should wind down if the queue is empty
+ self._shutdown: bool = False
for thread in self._threads:
thread.start()
def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
if not self._shutdown:
+ try:
+ evaluation_context = EVALUATION_CONTEXT.get()
+ except LookupError:
+ evaluation_context = None
for span in spans:
if is_humanloop_span(span):
- try:
- evaluation_context = EVALUATION_CONTEXT.get()
- except LookupError:
- # Decorators are not used in a client.evaluations.run() context
- evaluation_context = {} # type: ignore
+ # The thread doing the logging is different than the
+ # thread spawned by eval_utils.run.run_eval. Need
+ # to pass the EvaluationContext to the thread doing
+ # the logging
self._upload_queue.put((span, evaluation_context))
return SpanExportResult.SUCCESS
else:
@@ -106,30 +112,29 @@ def _do_work(self):
while self._upload_queue.qsize() > 0 or not self._shutdown:
try:
# Don't block or the thread will never be notified of the shutdown
- thread_args: tuple[ReadableSpan, EvaluationContext] = self._upload_queue.get(block=False)
+ thread_args: tuple[ReadableSpan, EvaluationContext] = self._upload_queue.get(block=False) # type: ignore
span_to_export, evaluation_context = thread_args
+ # Set the EvaluationContext for the thread so the .log action
+ # works as expected
+ EVALUATION_CONTEXT.set(evaluation_context)
except EmptyQueue:
continue
trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
if trace_metadata is None:
# Span is not part of a Flow Log
- self._export_span_dispatch(span_to_export, evaluation_context)
+ self._export_span_dispatch(span_to_export)
elif trace_metadata["trace_parent_id"] is None:
# Span is the head of a Flow Trace
- self._export_span_dispatch(span_to_export, evaluation_context)
+ self._export_span_dispatch(span_to_export)
elif trace_metadata["trace_parent_id"] in self._span_id_to_uploaded_log_id:
# Span is part of a Flow and its parent has been uploaded
- self._export_span_dispatch(span_to_export, evaluation_context)
+ self._export_span_dispatch(span_to_export)
else:
# Requeue the Span to be uploaded later
self._upload_queue.put((span_to_export, evaluation_context))
self._upload_queue.task_done()
- def _export_span_dispatch(
- self,
- span: ReadableSpan,
- evaluation_context: EvaluationContext,
- ) -> None:
+ def _export_span_dispatch(self, span: ReadableSpan) -> None:
hl_file = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
file_type = span._attributes.get(HL_FILE_TYPE_KEY) # type: ignore
@@ -141,13 +146,9 @@ def _export_span_dispatch(
export_func = self._export_flow
else:
raise NotImplementedError(f"Unknown span type: {hl_file}")
- export_func(span=span, evaluation_context=evaluation_context)
+ export_func(span=span)
- def _export_prompt(
- self,
- span: ReadableSpan,
- evaluation_context: EvaluationContext,
- ) -> None:
+ def _export_prompt(self, span: ReadableSpan) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(
span,
key=HL_FILE_KEY,
@@ -160,14 +161,11 @@ def _export_prompt(
# If not present, instantiate as empty dictionary
if "inputs" not in log_object:
log_object["inputs"] = {}
- # NOTE: Due to OTel conventions, lists are read as dictionaries
- # E.g. ["a", "b"] -> {"0": "a", "1": "b"}
- # We must convert the dictionary back to a list
- # See humanloop.otel.helpers._list_to_ott
if "messages" not in log_object:
log_object["messages"] = []
- else:
- log_object["messages"] = list(log_object["messages"].values())
+ # Same as with messages above
+ if "tools" not in file_object["prompt"]:
+ file_object["prompt"]["tools"] = []
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
@@ -192,24 +190,13 @@ def _export_prompt(
prompt=prompt,
**log_object,
trace_parent_id=trace_parent_id,
- source_datapoint_id=evaluation_context.get("source_datapoint_id"),
- run_id=evaluation_context.get("run_id"),
- request_options=RequestOptions(max_retries=3),
)
- if evaluation_context and log_response.prompt_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- log_object["id"] = log_response.id
- evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(str(e))
+ logger.error(f"TOO {e}")
self._span_id_to_uploaded_log_id[span.context.span_id] = None
- def _export_tool(
- self,
- span: ReadableSpan,
- evaluation_context: EvaluationContext,
- ) -> None:
+ def _export_tool(self, span: ReadableSpan) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
@@ -240,22 +227,13 @@ def _export_tool(
tool=tool,
**log_object,
trace_parent_id=trace_parent_id,
- request_options=RequestOptions(max_retries=3),
)
- if evaluation_context and log_response.tool_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- log_object["id"] = log_response.id
- evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(str(e))
+ logger.error(f"FOO {e}")
self._span_id_to_uploaded_log_id[span.context.span_id] = None
- def _export_flow(
- self,
- span: ReadableSpan,
- evaluation_context: EvaluationContext,
- ) -> None:
+ def _export_flow(self, span: ReadableSpan) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
@@ -266,8 +244,10 @@ def _export_flow(
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"], # type: ignore
)
- if trace_parent_id is None:
+ if trace_parent_id is None and trace_metadata["trace_id"] != span.get_span_context().span_id:
# Parent Log in Trace upload failed
+ # NOTE: Check if the trace_id metadata field points to the
+ # span itself. This signifies the span is the head of the Trace
file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
return
@@ -289,14 +269,8 @@ def _export_flow(
flow=flow,
**log_object,
trace_parent_id=trace_parent_id,
- source_datapoint_id=evaluation_context.get("source_datapoint_id"),
- run_id=evaluation_context.get("run_id"),
- request_options=RequestOptions(max_retries=3),
)
- if evaluation_context and log_response.flow_id == evaluation_context["evaluated_file_id"]:
- # Multiple Logs could be triggered by the Evaluation of a single File
- evaluation_context["upload_callback"](log_object)
self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(str(e))
+ logger.error(f"BAZ {e}")
self._span_id_to_uploaded_log_id[span.context.span_id] = None
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 89fc85ed..23f11d75 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -54,22 +54,6 @@ def write_to_opentelemetry_span(
to None will be silently dropped. Consider adding a placeholder value if the key should
be present in the span attributes.
- :param span: OpenTelemetry span to write values to
-
- :param value: Python object to write to the span attributes. Can also be a primitive value.
-
- :param key: Key prefix to write to the span attributes. The path to the values does not need to exist in the span attributes.
- """
- to_write_copy: Union[dict, AttributeValue]
- if isinstance(value, list):
- to_write_copy = _list_to_ott(value)
- else:
- to_write_copy = dict(value) # type: ignore
- linearised_attributes: dict[str, AttributeValue] = {}
- work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
- """
- Recurse through the dictionary value, building the OTel format keys in a DFS manner.
-
Example:
```python
{
@@ -90,8 +74,23 @@ def write_to_opentelemetry_span(
'baz.0': 42,
'baz.1': 43
}
+
+ :param span: OpenTelemetry span to write values to
+
+ :param value: Python object to write to the span attributes. Can also be a primitive value.
+
+ :param key: Key prefix to write to the span attributes. The path to the values does not need to exist in the span attributes.
```
"""
+
+ to_write_copy: Union[dict, AttributeValue]
+ if isinstance(value, list):
+ to_write_copy = _list_to_ott(value)
+ else:
+ to_write_copy = dict(value) # type: ignore
+ linearised_attributes: dict[str, AttributeValue] = {}
+ work_stack: list[tuple[str, Union[AttributeValue, NestedDict]]] = [(key, to_write_copy)]
+
# Remove all keys with the prefix to avoid duplicates
for attribute_key in span._attributes.keys(): # type: ignore
if attribute_key.startswith(key):
@@ -102,6 +101,11 @@ def write_to_opentelemetry_span(
if isinstance(value, dict):
for sub_key, sub_value in value.items():
work_stack.append((f"{key}.{sub_key}" if key else sub_key, sub_value))
+ elif isinstance(value, list):
+ # OTel does not allow lists of complex objects, so we linearise them
+ # by mapping each dict to an index key and recursing into the dict
+ for idx, list_value in enumerate(value):
+ work_stack.append((f"{key}.{idx}" if key else idx, list_value))
else:
linearised_attributes[key] = value # type: ignore
for final_key, final_value in linearised_attributes.items():
@@ -184,14 +188,55 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
len_parts = len(parts)
sub_result: dict[str, Union[dict, AttributeValue]] = result
for idx, part in enumerate(parts):
+ # For each section of the key formatted like 'foo.bar.baz'
+ # allocate the final value 'baz' to the final dict
if idx == len_parts - 1:
+ # Final part of the key
sub_result[part] = span_value
else:
if part not in sub_result:
+ # New dict since
sub_result[part] = {}
sub_result = sub_result[part] # type: ignore
+ def pseudo_to_list(sub_dict):
+ """Convert pseudo-dictionary to list if all keys are numeric.
+
+ Conversion happens bottom up.
+
+ Example:
+ ```python
+ {
+ '0': 'a',
+ '1': 'b',
+ '2': 'c'
+ }
+
+ ->
+
+ ['a', 'b', 'c']
+ ```
+ """
+ if not isinstance(sub_dict, dict):
+ # Primitive value
+ return sub_dict
+ if isinstance(sub_dict, dict):
+ for key, value in sub_dict.items():
+ # Recurse into keys
+ sub_dict[key] = pseudo_to_list(value)
+ if all(str.isnumeric(key) for key in sub_dict.keys()):
+ # If all keys are numeric, convert to list
+ return list(sub_dict.values())
+ return sub_dict
+
+ result = pseudo_to_list(result)
+ if "" in result:
+ # User read the root of attributes
+ return result[""]
+
for part in key.split("."):
+ if str.isnumeric(part):
+ part = int(part)
result = result[part] # type: ignore
return result
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 5ff112e5..c9cffc8e 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -127,6 +127,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None)
prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None)
prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None)
+ prompt["tools"] = list(prompt.get("tools", {}).values())
try:
# Validate the Prompt Kernel
@@ -157,13 +158,12 @@ def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: Readab
if "output_tokens" not in hl_log:
hl_log["output_tokens"] = gen_ai_object.get("usage", {}).get("completion_tokens")
if len(gen_ai_object.get("completion", [])) > 0:
- hl_log["finish_reason"] = gen_ai_object.get("completion", {}).get("0", {}).get("finish_reason")
- # Note: read_from_opentelemetry_span returns the list as a dict due to Otel conventions
+ hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason")
hl_log["messages"] = gen_ai_object.get("prompt")
try:
inputs = {}
- system_message = gen_ai_object["prompt"]["0"]["content"]
+ system_message = gen_ai_object["prompt"][0]["content"]
template = hl_file["prompt"]["template"]
parsed = parse.parse(template, system_message)
for key, value in parsed.named.items():
diff --git a/tests/conftest.py b/tests/conftest.py
index c3c396b7..5e626b39 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,10 +1,7 @@
-from typing import Callable, Generator
+from typing import Generator
from unittest.mock import MagicMock
import pytest
-from humanloop.decorators.flow import flow
-from humanloop.decorators.prompt import prompt
-from humanloop.decorators.tool import tool
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
diff --git a/tests/otel/test_helpers.py b/tests/otel/test_helpers.py
index 635030de..c409640e 100644
--- a/tests/otel/test_helpers.py
+++ b/tests/otel/test_helpers.py
@@ -62,10 +62,10 @@ def test_list(test_span: Span):
"key.0.y": "foo",
"key.1.z": "bar",
}
- assert read_from_opentelemetry_span(test_span, "key") == {
- "0": {"x": 7, "y": "foo"},
- "1": {"z": "bar"},
- }
+ assert read_from_opentelemetry_span(test_span, "key") == [
+ {"z": "bar"},
+ {"x": 7, "y": "foo"},
+ ]
def test_list_no_prefix(test_span: Span):
@@ -79,10 +79,10 @@ def test_list_no_prefix(test_span: Span):
"0.y": "foo",
"1.z": "bar",
}
- assert read_from_opentelemetry_span(test_span) == {
- "0": {"x": 7, "y": "foo"},
- "1": {"z": "bar"},
- }
+ assert read_from_opentelemetry_span(test_span) == [
+ {"z": "bar"},
+ {"x": 7, "y": "foo"},
+ ]
def test_multiple_nestings(test_span: Span):
@@ -94,20 +94,19 @@ def test_multiple_nestings(test_span: Span):
], # type: ignore
"key",
)
- # NOTE: attributes cannot be None at this point
assert dict(test_span.attributes) == { # type: ignore
"key.0.x": 7,
"key.0.y": "foo",
"key.1.0.z": "bar",
"key.1.1.a": 42,
}
- assert read_from_opentelemetry_span(test_span, "key") == {
- "0": {"x": 7, "y": "foo"},
- "1": {
- "0": {"z": "bar"},
- "1": {"a": 42},
- },
- }
+ assert read_from_opentelemetry_span(test_span, "key") == [
+ [
+ {"a": 42},
+ {"z": "bar"},
+ ],
+ {"x": 7, "y": "foo"},
+ ]
def test_read_mixed_numeric_string_keys(test_span: Span):
From 510439253c5ad8bf1c64868f978a9e0bb55931db Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 11:40:17 +0000
Subject: [PATCH 41/70] Rebase fixes
---
src/humanloop/eval_utils/run.py | 4 +++-
src/humanloop/evaluations/client.py | 6 ------
src/humanloop/otel/helpers.py | 9 +++++----
3 files changed, 8 insertions(+), 11 deletions(-)
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index c537fae0..665726dd 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -106,12 +106,14 @@ def _is_evaluated_file(
The user of the .log API can refer to the File that owns that Log either by
ID or Path. This function matches against any of them in EvaluationContext.
"""
+ if evaluation_context == {}:
+ return False
return evaluation_context.get("file_id") == log_args.get(file_id_attribute) or evaluation_context.get(
"path"
) == log_args.get("path")
# Copy the original log method in a hidden attribute
- client._log = client.log
+ client._log = client.log # type: ignore
def _overloaded_log(
self,
diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py
index 5f696145..efd4e908 100644
--- a/src/humanloop/evaluations/client.py
+++ b/src/humanloop/evaluations/client.py
@@ -14,7 +14,6 @@
from .requests.create_evaluation_request_evaluators_item import (
CreateEvaluationRequestEvaluatorsItemParams,
)
-from ..requests.evaluations_request import EvaluationsRequestParams
from ..requests.file_request import FileRequestParams
from ..core.serialization import convert_and_respect_annotation_metadata
from .requests.add_evaluators_request_evaluators_item import (
@@ -26,11 +25,6 @@
from .requests.create_run_request_version import CreateRunRequestVersionParams
from ..types.evaluation_run_response import EvaluationRunResponse
from ..types.evaluation_status import EvaluationStatus
-from ..requests.evaluations_dataset_request import EvaluationsDatasetRequestParams
-from ..requests.version_specification import VersionSpecificationParams
-from ..types.logs_association_type import LogsAssociationType
-from ..types.evaluation_run_response import EvaluationRunResponse
-from ..types.evaluation_status import EvaluationStatus
from ..types.evaluation_stats import EvaluationStats
from ..types.paginated_data_evaluation_log_response import (
PaginatedDataEvaluationLogResponse,
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 23f11d75..22591ebc 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -105,7 +105,7 @@ def write_to_opentelemetry_span(
# OTel does not allow lists of complex objects, so we linearise them
# by mapping each dict to an index key and recursing into the dict
for idx, list_value in enumerate(value):
- work_stack.append((f"{key}.{idx}" if key else idx, list_value))
+ work_stack.append((f"{key}.{idx}" if key else idx, list_value)) # type: ignore
else:
linearised_attributes[key] = value # type: ignore
for final_key, final_value in linearised_attributes.items():
@@ -232,12 +232,13 @@ def pseudo_to_list(sub_dict):
result = pseudo_to_list(result)
if "" in result:
# User read the root of attributes
- return result[""]
+ return result[""] # type: ignore
for part in key.split("."):
if str.isnumeric(part):
- part = int(part)
- result = result[part] # type: ignore
+ result = result[int(part)] # type: ignore
+ else:
+ result = result[part] # type: ignore
return result
From 7052cf8b901e11dc2a28d62fe0c50939ae8b8460 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 14:39:35 +0000
Subject: [PATCH 42/70] Added missing logic from #28 and #29
---
src/humanloop/eval_utils/run.py | 99 +++++++++++++++++--------------
src/humanloop/eval_utils/types.py | 36 ++---------
2 files changed, 62 insertions(+), 73 deletions(-)
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index 665726dd..3c1149df 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -9,7 +9,9 @@
"""
import inspect
+import json
import logging
+import copy
import sys
import threading
import time
@@ -245,7 +247,7 @@ def run_eval(
if isinstance(file, Callable): # type: ignore
# Decorated function
- file_: File = file.file # type: ignore
+ file_: File = copy.deepcopy(file.file) # type: ignore
else:
file_ = file # type: ignore
@@ -276,7 +278,6 @@ def run_eval(
else:
logger.info(f"No `callable` provided for your {type_} file - will attempt to generate logs on Humanloop.")
- custom_logger = file_.pop("custom_logger", None)
file_dict = {**file_, **version}
hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]
@@ -316,8 +317,20 @@ def run_eval(
raise NotImplementedError(f"Unsupported File type: {type_}")
# Upsert the Dataset
- hl_dataset = client.datasets.upsert(**dataset)
- hl_dataset = client.datasets.get(id=hl_dataset.id, include_datapoints=True)
+ if "action" not in dataset:
+ dataset["action"] = "set"
+ if "datapoints" not in dataset:
+ dataset["datapoints"] = []
+ # Use `upsert` to get existing dataset ID if no datapoints provided, given we can't `get` on path.
+ dataset["action"] = "add"
+ hl_dataset = client.datasets.upsert(
+ **dataset,
+ )
+ hl_dataset = client.datasets.get(
+ id=hl_dataset.id,
+ version_id=hl_dataset.version_id,
+ include_datapoints=True,
+ )
# Upsert the local Evaluators; other Evaluators are just referenced by `path` or `id`
local_evaluators: List[Evaluator] = []
@@ -329,7 +342,9 @@ def run_eval(
# TODO: support the case where `file` logs generated on Humanloop but Evaluator logs generated locally
if function_ is None:
raise ValueError(
- f"Local Evaluators are only supported when generating Logs locally using your {type_}'s `callable`. Please provide a `callable` for your file in order to run Evaluators locally."
+ "Local Evaluators are only supported when generating Logs locally using your "
+ f"{type_}'s `callable`. Please provide a `callable` for your file in order "
+ "to run Evaluators locally."
)
local_evaluators.append(evaluator)
spec = ExternalEvaluator(
@@ -345,16 +360,6 @@ def run_eval(
)
function_ = typing.cast(Callable, function_)
- # Validate signature of the called function
- function_signature = inspect.signature(function_)
- parameter_names = list(function_signature.parameters.keys())
- if parameter_names != ["inputs", "messages"] and parameter_names != ["inputs"]:
- raise ValueError(
- f"Your {type_}'s `callable` must have the signature `def "
- "function(inputs: dict, messages: Optional[dict] = None):` "
- "or `def function(inputs: dict):`"
- )
-
# Validate upfront that the local Evaluators and Dataset fit
requires_target = False
for local_evaluator in local_evaluators:
@@ -368,7 +373,8 @@ def run_eval(
missing_target += 1
if missing_target > 0:
raise ValueError(
- f"{missing_target} Datapoints have no target. A target is required for the Evaluator: {local_evaluator['path']}"
+ f"{missing_target} Datapoints have no target. A target "
+ f"is required for the Evaluator: {local_evaluator['path']}"
)
# Get or create the Evaluation based on the name
@@ -431,14 +437,14 @@ def upload_callback(log: dict):
# handle the logging, which will call the upload_callback
# function above when it's done
function_( # type: ignore
- datapoint_dict["inputs"],
+ **datapoint_dict["inputs"],
messages=datapoint_dict["messages"],
)
else:
# function_ is decorated by Humanloop, the OTel Exporter will
# handle the logging, which will call the upload_callback
# function above when it's done
- function_(datapoint_dict["inputs"]) # type: ignore
+ function_(**datapoint_dict["inputs"]) # type: ignore
else:
# Define the function to execute your function in parallel and Log to Humanloop
@@ -461,20 +467,21 @@ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
)
else:
output = function_(**datapoint_dict["inputs"]) # type: ignore
- if custom_logger:
- log = custom_logger(client=client, output=output) # type: ignore
- else:
- if not isinstance(output, str):
+ if not isinstance(output, str):
+ try:
+ output = json.dumps(output)
+ # throw error if it fails to serialize
+ except Exception as _:
raise ValueError(
- f"Your {type_}'s `callable` must return a string if you do not provide a custom logger."
+ f"Your {type_}'s `callable` must return a string or a JSON serializable object."
)
- log = log_func(
- inputs=dp.inputs,
- output=output,
- source_datapoint_id=dp.id,
- start_time=start_time,
- end_time=datetime.now(),
- )
+ log = log_func(
+ inputs=datapoint.inputs,
+ output=output,
+ source_datapoint_id=datapoint.id,
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
except Exception as e:
log = log_func(
inputs=dp.inputs,
@@ -522,7 +529,11 @@ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
while not complete:
stats = client.evaluations.get_stats(id=evaluation.id)
logger.info(f"\r{stats.progress}")
- complete = stats.status == "completed"
+ run_stats = next(
+ (run_stats for run_stats in stats.run_stats if run_stats.run_id == run_id),
+ None,
+ )
+ complete = run_stats is not None and run_stats.status == "completed"
if not complete:
time.sleep(5)
@@ -681,12 +692,17 @@ def _check_evaluation_improvement(
logger.info(f"{YELLOW}⚠️ No previous versions to compare with.{RESET}")
return True, 0, 0
- previous_evaluator_stats_by_path = _get_evaluator_stats_by_path(stat=stats.run_stats[-2], evaluation=evaluation)
+ previous_evaluator_stats_by_path = _get_evaluator_stats_by_path(
+ stat=stats.run_stats[1],
+ evaluation=evaluation,
+ )
if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path:
latest_evaluator_stat = latest_evaluator_stats_by_path[evaluator_path]
previous_evaluator_stat = previous_evaluator_stats_by_path[evaluator_path]
latest_score = _get_score_from_evaluator_stat(stat=latest_evaluator_stat)
previous_score = _get_score_from_evaluator_stat(stat=previous_evaluator_stat)
+ if latest_score is None or previous_score is None:
+ raise ValueError(f"Could not find score for Evaluator {evaluator_path}.")
diff = round(latest_score - previous_score, 2) # type: ignore
if diff >= 0:
logger.info(f"{CYAN}Change of [{diff}] for Evaluator {evaluator_path}{RESET}")
@@ -716,17 +732,14 @@ def _add_log_to_evaluation(
else:
judgement = eval_function(log)
- if local_evaluator.get("custom_logger", None):
- local_evaluator["custom_logger"](judgement, start_time, datetime.now())
- else:
- _ = client.evaluators.log(
- parent_id=log["id"],
- judgment=judgement,
- id=local_evaluator.get("id"),
- path=local_evaluator.get("path"),
- start_time=start_time,
- end_time=datetime.now(),
- )
+ _ = client.evaluators.log(
+ parent_id=log["id"],
+ judgment=judgement,
+ id=local_evaluator.get("id"),
+ path=local_evaluator.get("path"),
+ start_time=start_time,
+ end_time=datetime.now(),
+ )
except Exception as e:
_ = client.evaluators.log(
parent_id=log["id"],
diff --git a/src/humanloop/eval_utils/types.py b/src/humanloop/eval_utils/types.py
index 102cf5fa..8db5832d 100644
--- a/src/humanloop/eval_utils/types.py
+++ b/src/humanloop/eval_utils/types.py
@@ -46,23 +46,13 @@ class File(Identifiers):
"""The function being evaluated.
It will be called using your Dataset `inputs` as follows: `output = callable(**datapoint.inputs)`.
If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`.
- It should return a single string output. If not, you must provide a `custom_logger`.
+ It should return a string or json serializable output.
"""
- custom_logger: NotRequired[Callable]
- """function that logs the output of your function to Humanloop, replacing the default logging.
- If provided, it will be called as follows:
- ```
- output = callable(**datapoint.inputs).
- log = custom_logger(client, output)
- ```
- Inside the custom_logger, you can use the Humanloop `client` to log the output of your function.
- If not provided your pipeline must return a single string.
- """
- is_decorated: NotRequired[Literal[True]]
+ is_decorated: NotRequired[bool]
class Dataset(Identifiers):
- datapoints: Sequence[DatapointDict]
+ datapoints: NotRequired[Sequence[DatapointDict]]
"""The datapoints to map your function over to produce the outputs required by the evaluation."""
action: NotRequired[UpdateDatasetAction]
"""How to update the Dataset given the provided Datapoints;
@@ -72,26 +62,12 @@ class Dataset(Identifiers):
class Evaluator(Identifiers):
"""The Evaluator to provide judgments for this Evaluation."""
- custom_logger: NotRequired[Callable]
-
- """The type of arguments the Evaluator expects - only required for local Evaluators."""
args_type: NotRequired[EvaluatorArgumentsType]
-
- """The type of return value the Evaluator produces - only required for local Evaluators."""
+ """The type of arguments the Evaluator expects - only required for local Evaluators."""
return_type: NotRequired[EvaluatorReturnTypeEnum]
-
- """The function to run on the logs to produce the judgment - only required for local Evaluators."""
+ """The type of return value the Evaluator produces - only required for local Evaluators."""
callable: NotRequired[Callable]
-
- """optional function that logs the output judgment from your Evaluator to Humanloop, if provided, it will be called as follows:
- custom_logger: NotRequired[Callable]
- ```
- judgment = callable(log_dict)
- log = custom_logger(client, judgment)
- ```
- Inside the custom_logger, you can use the Humanloop `client` to log the judgment to Humanloop.
- If not provided your function must return a single string and by default the code will be used to inform the version of the external Evaluator on Humanloop.
- """
+ """The function to run on the logs to produce the judgment - only required for local Evaluators."""
threshold: NotRequired[float]
"""The threshold to check the Evaluator against. If the aggregate value of the Evaluator is below this threshold, the check will fail."""
From 6e79d83857ab2880669aacbabdeba207f72b5919 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 14:48:00 +0000
Subject: [PATCH 43/70] PR feedback
---
src/humanloop/eval_utils/run.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index 3c1149df..a8419601 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -168,7 +168,6 @@ def _overloaded_log(
**kwargs,
}
)
- # Log has been added to evaluation, reset the context for current Thread
return response
@@ -412,7 +411,7 @@ def run_eval(
def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
def upload_callback(log: dict):
# OTel exporter will call this after the Log is uploaded
- _add_log_to_evaluation(
+ _run_local_evaluators(
client=client,
log=log,
datapoint_target=dp.target,
@@ -492,7 +491,7 @@ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
)
logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
- _add_log_to_evaluation(
+ _run_local_evaluators(
client=client,
log=log,
datapoint_target=dp.target,
@@ -714,7 +713,7 @@ def _check_evaluation_improvement(
raise ValueError(f"Evaluator {evaluator_path} not found in the stats.")
-def _add_log_to_evaluation(
+def _run_local_evaluators(
client: "BaseHumanloop",
log: dict,
datapoint_target: typing.Optional[typing.Dict[str, DatapointResponseTargetValue]],
From 15a05963dc8ea13ae2a251f126e0fa209db34b31 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 18:04:07 +0000
Subject: [PATCH 44/70] Support chat template + parsing log inputs from
function call args
---
.fernignore | 4 ++--
src/humanloop/client.py | 8 ++++----
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/decorators/prompt.py | 4 ++++
src/humanloop/decorators/tool.py | 11 +++++------
src/humanloop/otel/exporter.py | 6 ++----
src/humanloop/otel/processor.py | 20 --------------------
7 files changed, 18 insertions(+), 37 deletions(-)
diff --git a/.fernignore b/.fernignore
index 08310ad3..0dd0039a 100644
--- a/.fernignore
+++ b/.fernignore
@@ -8,5 +8,5 @@ README.md
# Directories used by SDK decorators
-src/humanloop/decorators/_
-src/humanloop/otel/_
+src/humanloop/decorators/*
+src/humanloop/otel/*
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index eb2f3204..2f5f8700 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -181,7 +181,7 @@ def prompt(
Here's an example of declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code:
```python
- @prompt(template="You are an assistant on the following topics: {topics}.")
+ @prompt(template="You are an assistant on the following topics: {{topics}}.")
def call_llm(messages):
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
return client.chat.completions.create(
@@ -199,7 +199,7 @@ def call_llm(messages):
{
model: "gpt-4o",
endpoint: "chat",
- template: "You are an assistant on the following topics: {topics}.",
+ template: "You are an assistant on the following topics: {{topics}}.",
provider: "openai",
max_tokens: 200,
temperature: 0.8,
@@ -240,7 +240,7 @@ def call_llm(messages):
:param template: The template for the Prompt. This is the text of
the system message used to set the LLM prompt. The template
- accepts template slots using the format `{slot_name}`.
+ accepts template slots using the format `{{slot_name}}`.
:param provider: The company providing the underlying model service.
@@ -394,7 +394,7 @@ def flow(
Here's an example of declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code:
```python
- @prompt(template="You are an assistant on the following topics: {topics}.")
+ @prompt(template="You are an assistant on the following topics: {{topics}}.")
def call_llm(messages):
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
return client.chat.completions.create(
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index d019efe8..a916f1de 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -67,7 +67,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(f"{func.__name__}: {str(e)}")
+ logger.error(f"{func.__name__}: {e}")
output = None
error = str(e)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index b066f8d3..51438fb6 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -9,6 +9,7 @@
if typing.TYPE_CHECKING:
from humanloop import ToolFunctionParams
+from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
@@ -75,7 +76,9 @@ def decorator(func: Callable):
"other": other,
"seed": seed,
"response_format": response_format,
+ # {} -> None
"attributes": attributes or None,
+ "tools": tools,
}.items():
prompt_kernel[attr_name] = attr_value # type: ignore
@@ -114,6 +117,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
error = str(e)
prompt_log = {
+ "inputs": args_to_inputs(func, args, kwargs),
"output": output,
"error": error,
}
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 6285af80..74107fb3 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -83,12 +83,11 @@ def wrapper(*args, **kwargs):
}
# Write the Tool Log to the Span on HL_LOG_OT_KEY
- if tool_log:
- write_to_opentelemetry_span(
- span=span,
- key=HL_LOG_KEY,
- value=tool_log,
- )
+ write_to_opentelemetry_span(
+ span=span,
+ key=HL_LOG_KEY,
+ value=tool_log,
+ )
# Return the output of the decorated function
return output
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index ad501d58..e9cea094 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -31,7 +31,7 @@ class HumanloopSpanExporter(SpanExporter):
Spans not created by Humanloop SDK decorators will be ignored.
"""
- DEFAULT_NUMBER_THREADS = 4
+ DEFAULT_NUMBER_THREADS = 1
def __init__(
self,
@@ -193,7 +193,6 @@ def _export_prompt(self, span: ReadableSpan) -> None:
)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(f"TOO {e}")
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_tool(self, span: ReadableSpan) -> None:
@@ -230,7 +229,6 @@ def _export_tool(self, span: ReadableSpan) -> None:
)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(f"FOO {e}")
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_flow(self, span: ReadableSpan) -> None:
@@ -272,5 +270,5 @@ def _export_flow(self, span: ReadableSpan) -> None:
)
self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
except HumanloopApiError as e:
- logger.error(f"BAZ {e}")
+ logger.error(str(e))
self._span_id_to_uploaded_log_id[span.context.span_id] = None
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index c9cffc8e..2567e41f 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -1,10 +1,8 @@
-import json
import logging
from collections import defaultdict
from typing import Any
# No typing stubs for parse
-import parse # type: ignore
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from pydantic import ValidationError as PydanticValidationError
@@ -146,7 +144,6 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_KEY)
try:
hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_KEY)
except KeyError:
@@ -161,23 +158,6 @@ def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: Readab
hl_log["finish_reason"] = gen_ai_object["completion"][0].get("finish_reason")
hl_log["messages"] = gen_ai_object.get("prompt")
- try:
- inputs = {}
- system_message = gen_ai_object["prompt"][0]["content"]
- template = hl_file["prompt"]["template"]
- parsed = parse.parse(template, system_message)
- for key, value in parsed.named.items():
- try:
- parsed_value = json.loads(value.replace("'", '"'))
- except json.JSONDecodeError:
- parsed_value = value
- inputs[key] = parsed_value
- except Exception as e:
- logging.error(e)
- inputs = None
- finally:
- hl_log["inputs"] = inputs
-
write_to_opentelemetry_span(
span=prompt_span,
key=HL_LOG_KEY,
From 84fe73cc41d48ae9b64ffd1001d3dace615b9492 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Thu, 7 Nov 2024 18:35:37 +0000
Subject: [PATCH 45/70] QA bugfixing
---
src/humanloop/decorators/flow.py | 4 +++-
src/humanloop/decorators/prompt.py | 2 +-
src/humanloop/decorators/tool.py | 2 +-
src/humanloop/otel/exporter.py | 8 +++-----
src/humanloop/otel/processor.py | 2 +-
5 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index a916f1de..35caceba 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,4 +1,5 @@
import logging
+import traceback
import uuid
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
@@ -67,7 +68,8 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(f"{func.__name__}: {e}")
+ # print error, line of code, and function name
+ logger.error(f"Error calling {func.__name__}: {e}")
output = None
error = str(e)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 51438fb6..919eccbb 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -112,7 +112,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(f"{func.__name__}: {e}")
+ logger.error(f"Error calling {func.__name__}: {e}")
output = None
error = str(e)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 74107fb3..737d19f1 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -71,7 +71,7 @@ def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
error = None
except Exception as e:
- logger.error(f"{func.__name__}: {e}")
+ logger.error(f"Error calling {func.__name__}: {e}")
output = None
error = str(e)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index e9cea094..3cb294ba 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -31,7 +31,7 @@ class HumanloopSpanExporter(SpanExporter):
Spans not created by Humanloop SDK decorators will be ignored.
"""
- DEFAULT_NUMBER_THREADS = 1
+ DEFAULT_NUMBER_THREADS = 4
def __init__(
self,
@@ -257,10 +257,8 @@ def _export_flow(self, span: ReadableSpan) -> None:
else:
flow = file_object["flow"]
path: str = file_object["path"]
- if not isinstance(log_object["output"], str):
- # Output expected to be a string, if decorated function
- # does not return one, jsonify it
- log_object["output"] = json.dumps(log_object["output"])
+ if "output" not in log_object:
+ log_object["output"] = None
try:
log_response = self._client.flows.log(
path=path,
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 2567e41f..24a6749c 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -125,7 +125,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
prompt["max_tokens"] = prompt.get("max_tokens") or gen_ai_object.get("request", {}).get("max_tokens", None)
prompt["presence_penalty"] = prompt.get("presence_penalty") or llm_object.get("presence_penalty", None)
prompt["frequency_penalty"] = prompt.get("frequency_penalty") or llm_object.get("frequency_penalty", None)
- prompt["tools"] = list(prompt.get("tools", {}).values())
+ prompt["tools"] = prompt.get("tools", [])
try:
# Validate the Prompt Kernel
From a3e4783b044af05e077245ccb121588c4cc37a2c Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 03:42:26 +0000
Subject: [PATCH 46/70] PR feedback from Harry
---
src/humanloop/client.py | 61 +++-
src/humanloop/decorators/flow.py | 22 +-
src/humanloop/decorators/prompt.py | 45 +--
src/humanloop/decorators/tool.py | 399 ++++++++++++----------
src/humanloop/eval_utils/__init__.py | 2 +-
src/humanloop/eval_utils/context.py | 7 +-
src/humanloop/eval_utils/run.py | 208 +++++------
src/humanloop/eval_utils/types.py | 1 -
src/humanloop/otel/constants.py | 8 +-
src/humanloop/otel/exporter.py | 89 +++--
src/humanloop/otel/helpers.py | 19 +-
src/humanloop/otel/processor.py | 18 +-
tests/decorators/test_flow_decorator.py | 24 +-
tests/decorators/test_prompt_decorator.py | 37 +-
tests/decorators/test_tool_decorator.py | 56 ++-
15 files changed, 551 insertions(+), 445 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 2f5f8700..50a43cfc 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,13 +1,15 @@
-import logging
+from contextvars import ContextVar
import os
import typing
-from typing import Any, Callable, List, Optional, Sequence, Union
+from typing import Any, List, Optional, Sequence
import httpx
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.trace import Tracer
+from humanloop.core.client_wrapper import SyncClientWrapper
+from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
@@ -15,30 +17,39 @@
from humanloop.types.response_format import ResponseFormat
if typing.TYPE_CHECKING:
- from . import ToolFunctionParams
+ from humanloop import ToolFunctionParams
from humanloop.eval_utils import log_with_evaluation_context, run_eval
from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
-from .base_client import AsyncBaseHumanloop, BaseHumanloop
-from .decorators.flow import flow as flow_decorator_factory
-from .decorators.prompt import prompt as prompt_decorator_factory
-from .decorators.tool import tool as tool_decorator_factory
-from .environment import HumanloopEnvironment
-from .evaluations.client import EvaluationsClient
-from .otel import instrument_provider
-from .otel.exporter import HumanloopSpanExporter
-from .otel.processor import HumanloopSpanProcessor
-from .prompt_utils import populate_template
-from .prompts.client import PromptsClient
+from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
+from humanloop.decorators.flow import flow as flow_decorator_factory
+from humanloop.decorators.prompt import prompt as prompt_decorator_factory
+from humanloop.decorators.tool import tool as tool_decorator_factory
+from humanloop.environment import HumanloopEnvironment
+from humanloop.evaluations.client import EvaluationsClient
+from humanloop.otel import instrument_provider
+from humanloop.otel.exporter import HumanloopSpanExporter
+from humanloop.otel.processor import HumanloopSpanProcessor
+from humanloop.prompt_utils import populate_template
+from humanloop.prompts.client import PromptsClient
class ExtendedEvalsClient(EvaluationsClient):
client: BaseHumanloop
+ def __init__(
+ self,
+ *,
+ client_wrapper: SyncClientWrapper,
+ evaluation_context_variable: ContextVar[Optional[EvaluationContext]],
+ ):
+ super().__init__(client_wrapper=client_wrapper)
+ self._evaluation_context_variable = evaluation_context_variable
+
def run(
self,
- file: Union[File, Callable],
+ file: File,
name: Optional[str],
dataset: Dataset,
evaluators: Optional[Sequence[Evaluator]] = None,
@@ -64,6 +75,7 @@ def run(
dataset=dataset,
evaluators=evaluators,
workers=workers,
+ evaluation_context_variable=self._evaluation_context_variable,
)
@@ -111,7 +123,14 @@ def __init__(
httpx_client=httpx_client,
)
- eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
+ self.evaluation_context_variable: ContextVar[Optional[EvaluationContext]] = ContextVar(
+ EVALUATION_CONTEXT_VARIABLE_NAME
+ )
+
+ eval_client = ExtendedEvalsClient(
+ client_wrapper=self._client_wrapper,
+ evaluation_context_variable=self.evaluation_context_variable,
+ )
eval_client.client = self
self.evaluations = eval_client
self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper)
@@ -119,10 +138,16 @@ def __init__(
# Overload the .log method of the clients to be aware of Evaluation Context
# TODO: Overload the log for Evaluators and Tools once run_id is added
# to them.
- self.prompts = log_with_evaluation_context(client=self.prompts)
+ self.prompts = log_with_evaluation_context(
+ client=self.prompts,
+ evaluation_context_variable=self.evaluation_context_variable,
+ )
# self.evaluators = log_with_evaluation_context(client=self.evaluators)
# self.tools = log_with_evaluation_context(client=self.tools)
- self.flows = log_with_evaluation_context(client=self.flows)
+ self.flows = log_with_evaluation_context(
+ client=self.flows,
+ evaluation_context_variable=self.evaluation_context_variable,
+ )
if opentelemetry_tracer_provider is not None:
self._tracer_provider = opentelemetry_tracer_provider
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 35caceba..3b22d8db 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,7 +1,7 @@
import logging
-import traceback
-import uuid
from functools import wraps
+import os
+import sys
from typing import Any, Callable, Mapping, Optional, Sequence
from opentelemetry.sdk.trace import Span
@@ -11,8 +11,8 @@
from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils.types import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
-from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
+from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
from humanloop.requests import FlowKernelRequestParams as FlowDict
logger = logging.getLogger("humanloop.sdk")
@@ -31,7 +31,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span: Span
- with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span:
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
@@ -52,12 +52,12 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
is_flow_log=True,
)
- span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
- span.set_attribute(HL_FILE_TYPE_KEY, "flow")
+ span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "flow")
if attributes:
write_to_opentelemetry_span(
span=span,
- key=f"{HL_FILE_KEY}.flow.attributes",
+ key=f"{HUMANLOOP_FILE_KEY}.flow.attributes",
value=attributes, # type: ignore
)
@@ -68,7 +68,6 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
output = func(*args, **kwargs)
error = None
except Exception as e:
- # print error, line of code, and function name
logger.error(f"Error calling {func.__name__}: {e}")
output = None
error = str(e)
@@ -87,18 +86,17 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
if flow_log:
write_to_opentelemetry_span(
span=span,
- key=HL_LOG_KEY,
+ key=HUMANLOOP_LOG_KEY,
value=flow_log, # type: ignore
)
# Return the output of the decorated function
return output
- func.file = File( # type: ignore
+ wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="flow",
version=FlowDict(attributes=attributes), # type: ignore
- is_decorated=True,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 919eccbb..d146d665 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,4 +1,6 @@
import logging
+import os
+import sys
import typing
import uuid
from functools import wraps
@@ -7,13 +9,15 @@
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
+from humanloop.types.prompt_kernel_request import PromptKernelRequest
+
if typing.TYPE_CHECKING:
from humanloop import ToolFunctionParams
from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
-from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
+from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
from humanloop.types.model_endpoints import ModelEndpoints
from humanloop.types.model_providers import ModelProviders
from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
@@ -45,27 +49,6 @@ def prompt(
):
def decorator(func: Callable):
prompt_kernel = {}
-
- if temperature is not None:
- if not 0 <= temperature < 1:
- raise ValueError(f"{func.__name__}: Temperature parameter must be between 0 and 1")
- prompt_kernel["temperature"] = temperature
-
- if top_p is not None:
- if not 0 <= top_p <= 1:
- raise ValueError(f"{func.__name__}: Top-p parameter must be between 0 and 1")
- prompt_kernel["top_p"] = top_p
-
- if presence_penalty is not None:
- if not -2 <= presence_penalty <= 2:
- raise ValueError(f"{func.__name__}: Presence penalty parameter must be between -2 and 2")
- prompt_kernel["presence_penalty"] = presence_penalty
-
- if frequency_penalty is not None:
- if not -2 <= frequency_penalty <= 2:
- raise ValueError(f"{func.__name__}: Frequency penalty parameter must be between -2 and 2")
- prompt_kernel["frequency_penalty"] = frequency_penalty
-
for attr_name, attr_value in {
"model": model,
"endpoint": endpoint,
@@ -76,16 +59,19 @@ def decorator(func: Callable):
"other": other,
"seed": seed,
"response_format": response_format,
- # {} -> None
"attributes": attributes or None,
"tools": tools,
+ "temperature": temperature,
+ "top_p": top_p,
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
}.items():
prompt_kernel[attr_name] = attr_value # type: ignore
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span: Span
- with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span:
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
@@ -97,13 +83,13 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
is_flow_log=False,
)
- span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
- span.set_attribute(HL_FILE_TYPE_KEY, "prompt")
+ span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "prompt")
if prompt_kernel:
write_to_opentelemetry_span(
span=span,
- key=f"{HL_FILE_KEY}.prompt",
+ key=f"{HUMANLOOP_FILE_KEY}.prompt",
value=prompt_kernel, # type: ignore
)
@@ -123,7 +109,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
}
write_to_opentelemetry_span(
span=span,
- key=HL_LOG_KEY,
+ key=HUMANLOOP_LOG_KEY,
value=prompt_log,
)
@@ -140,7 +126,6 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
path=path if path else func.__name__,
type="prompt",
version={**prompt_kernel_file}, # type: ignore
- is_decorated=True,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 737d19f1..ec95251c 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -1,9 +1,11 @@
import builtins
import inspect
import logging
+import os
+import sys
import textwrap
import typing
-import uuid
+from dataclasses import dataclass
from functools import wraps
from inspect import Parameter
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
@@ -12,8 +14,8 @@
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
-from humanloop.otel.helpers import write_to_opentelemetry_span
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
+from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
@@ -42,7 +44,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
- with opentelemetry_tracer.start_as_current_span(str(uuid.uuid4())) as span:
+ with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span:
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
@@ -57,12 +59,12 @@ def wrapper(*args, **kwargs):
)
# Write the Tool Kernel to the Span on HL_FILE_OT_KEY
- span.set_attribute(HL_PATH_KEY, path if path else func.__name__)
- span.set_attribute(HL_FILE_TYPE_KEY, "tool")
+ span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__)
+ span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "tool")
if tool_kernel:
write_to_opentelemetry_span(
span=span,
- key=f"{HL_FILE_KEY}.tool",
+ key=f"{HUMANLOOP_FILE_KEY}.tool",
value=tool_kernel,
)
@@ -85,18 +87,17 @@ def wrapper(*args, **kwargs):
# Write the Tool Log to the Span on HL_LOG_OT_KEY
write_to_opentelemetry_span(
span=span,
- key=HL_LOG_KEY,
+ key=HUMANLOOP_LOG_KEY,
value=tool_log,
)
# Return the output of the decorated function
return output
- func.file = File( # type: ignore
+ wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="tool",
version=tool_kernel,
- is_decorated=True,
callable=wrapper,
)
@@ -112,12 +113,14 @@ def _build_tool_kernel(
strict: bool,
) -> ToolKernelRequestParams:
"""Build ToolKernelRequest object from decorated function."""
+ source_code = textwrap.dedent(inspect.getsource(func))
+ # Remove decorator from source code by finding first 'def'
+ # This makes the source_code extraction idempotent whether
+ # the decorator is applied directly or used as a higher-order
+ # function
+ source_code = source_code[source_code.find("def") :]
kernel = ToolKernelRequestParams(
- source_code=textwrap.dedent(
- # Remove the tool decorator from source code
- inspect.getsource(func).split("\n", maxsplit=1)[1]
- ),
- # Note: OTel complains about falsy values in attributes, so we use OT_EMPTY_ATTRIBUTE
+ source_code=source_code,
function=_build_function_property(
func=func,
strict=strict,
@@ -162,7 +165,7 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
):
- raise ValueError(f"{func.__name__}: Varargs and kwargs are not supported by the @tool decorator")
+ raise ValueError(f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator")
for parameter in signature.parameters.values():
try:
@@ -191,58 +194,76 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
)
-def _parse_annotation(annotation: typing.Type) -> Union[list, tuple]:
- """Parse constituent parts of a potentially nested type hint.
+_PRIMITIVE_TYPES = Union[
+ str,
+ int,
+ float,
+ bool,
+ Parameter.empty, # type: ignore
+]
- Custom types are not supported, only built-in types and typing module types.
+@dataclass
+class _ParsedAnnotation:
+ def no_type_hint(self) -> bool:
+ """Check if the annotation has no type hint.
- Method returns potentially nested lists, with each list describing a
- level of type nesting. For a nested type, the function recursively calls
- itself to parse the inner type.
+ Examples:
+ str -> False
+ list -> True
+ list[str] -> False
+ """
+ raise NotImplementedError
- When the annotation is optional, a tuple is returned with the inner type
- to signify that the parameter is nullable.
- For lists, a list with two elements is returned, where the first element
- is the list type and the second element is the inner type.
+@dataclass
+class _ParsedPrimitiveAnnotation(_ParsedAnnotation):
+ annotation: _PRIMITIVE_TYPES
- For dictionaries, a list with three elements is returned, where the first
- element is the dict type, the second element is the key type, and the
- third element is the value type.
+ def no_type_hint(self) -> bool:
+ return self.annotation is Parameter.empty
- For tuples, a list where the fist element is the tuple type and the rest
- describes the inner types.
- For Union types, a list with the first element being the Union type and
- the rest describing the inner types.
+@dataclass
+class _ParsedDictAnnotation(_ParsedAnnotation):
+ # Both are null if no type hint e.g. dict vs dict[str, int]
+ key_annotation: Optional[_ParsedAnnotation]
+ value_annotation: Optional[_ParsedAnnotation]
- Note that for nested types that lack inner type, e.g. list instead of
- list[str], the inner type is set to Parameter.empty. This edge case is
- handled by _annotation_parse_to_json_schema.
+ def no_type_hint(self) -> bool:
+ return self.key_annotation is None and self.value_annotation is None
- Examples:
- str -> [str]
- Optional[str] -> (str)
- str | None -> (str)
- list[str] -> [list, [str]]
- Optional[list[str]] -> (list, [str])
+@dataclass
+class _ParsedTupleAnnotation(_ParsedAnnotation):
+ # Null if no type hint e.g. tuple vs tuple[str, int]
+ annotation: Optional[list[_ParsedAnnotation]]
+
+ def no_type_hint(self) -> bool:
+ return self.annotation is None
- dict[str, int] -> [dict, [str], [int]]
- Optional[dict[str, int]] -> (dict, [str], [int])
- list[list[str]] -> [list, [list, str]]
- list[Optional[list[str]]] -> [list, (list, [str])]
+@dataclass
+class _ParsedUnionAnnotation(_ParsedAnnotation):
+ annotation: list[_ParsedAnnotation]
- dict[str, Optional[int]] -> [dict, [str], (int)]
- Union[str, int] -> [Union, [str], [int]]
+@dataclass
+class _ParsedListAnnotation(_ParsedAnnotation):
+ # Null if no type hint e.g. list vs list[str]
+ annotation: Optional[_ParsedAnnotation]
+
+
+@dataclass
+class _ParsedOptionalAnnotation(_ParsedAnnotation):
+ annotation: _ParsedAnnotation
+
+
+def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
+ """Parse constituent parts of a potentially nested type hint.
- tuple[str, int, list[str]] -> [tuple, [str], [int], [list, str]]
- tuple[Optional[str], int, Optional[list[str]]] -> (str, [int], (list, str))
+ Custom types are not supported, only built-in types and typing module types.
- list -> [list]
"""
origin = typing.get_origin(annotation)
if origin is None:
@@ -250,182 +271,192 @@ def _parse_annotation(annotation: typing.Type) -> Union[list, tuple]:
# Parameter.empty is used for parameters without type hints
if annotation not in (str, int, float, bool, Parameter.empty, dict, list, tuple):
raise ValueError(f"Unsupported type hint: {annotation}")
- return [annotation]
+
+ # Check if it's a complex type with no inner type
+ if annotation == builtins.dict:
+ return _ParsedDictAnnotation(
+ value_annotation=None,
+ key_annotation=None,
+ )
+ if annotation == builtins.list:
+ return _ParsedListAnnotation(
+ annotation=None,
+ )
+ if annotation == builtins.tuple:
+ return _ParsedTupleAnnotation(
+ annotation=None,
+ )
+
+ # Is a primitive type
+ return _ParsedPrimitiveAnnotation(
+ annotation=annotation,
+ )
+
if origin is list:
- inner_type = _parse_annotation(typing.get_args(annotation)[0])
- return [origin, inner_type]
+ inner_annotation = _parse_annotation(typing.get_args(annotation)[0])
+ return _ParsedListAnnotation(
+ annotation=inner_annotation,
+ )
+
if origin is dict:
key_type = _parse_annotation(typing.get_args(annotation)[0])
value_type = _parse_annotation(typing.get_args(annotation)[1])
- return [origin, key_type, value_type]
+ return _ParsedDictAnnotation(
+ key_annotation=key_type,
+ value_annotation=value_type,
+ )
+
if origin is tuple:
- return [
- origin,
- *[_parse_annotation(arg) for arg in typing.get_args(annotation)],
- ]
+ return _ParsedTupleAnnotation(
+ annotation=[_parse_annotation(arg) for arg in typing.get_args(annotation)],
+ )
+
if origin is typing.Union:
sub_types = typing.get_args(annotation)
if sub_types[-1] is type(None):
- # Union is an Optional type
+ # type(None) in sub_types indicates Optional type
if len(sub_types) == 2:
- return tuple(_parse_annotation(sub_types[0]))
- return (
- origin,
- *[_parse_annotation(sub_type) for sub_type in sub_types[:-1]],
+ # Union is an Optional type only
+ return _ParsedOptionalAnnotation(
+ annotation=_parse_annotation(sub_types[0]),
+ )
+ # Union has sub_types and is Optional
+ return _ParsedOptionalAnnotation(
+ annotation=_ParsedUnionAnnotation(
+ annotation=[_parse_annotation(sub_type) for sub_type in sub_types[:-1]],
+ )
)
- # Union type
- return [
- origin,
- *[_parse_annotation(sub_type) for sub_type in sub_types],
- ]
+ # Union type that is not Optional
+ return _ParsedUnionAnnotation(
+ annotation=[_parse_annotation(sub_type) for sub_type in sub_types],
+ )
raise ValueError(f"Unsupported origin: {origin}")
-def _annotation_parse_to_json_schema(arg: Union[list, tuple]) -> Mapping[str, Union[str, Mapping, Sequence]]:
+_JSON_SCHEMA_ANY = ["string", "integer", "number", "boolean", "object", "array", "null"]
+
+
+def _annotation_parse_to_json_schema(
+ arg: _ParsedAnnotation,
+) -> Mapping[str, Union[str, Mapping, Sequence]]:
"""
Convert parse result from _parse_annotation to JSON Schema for a parameter.
The function recursively converts the nested type hints to a JSON Schema.
Note that 'any' is not supported by JSON Schema, so we allow any type as a workaround.
-
- Examples:
- [str] -> {"type": "string"}
- (str) -> {"type": ["string", "null"]}
-
- [list, [str]] -> {"type": "array", "items": {"type": "string"}}
- (list, [str]) -> {"type": ["array", "null"], "items": {"type": "string"}}
-
- [dict, [str], [int]] ->
- {
- "type": "object",
- "properties": {
- "key": {"type": "string"},
- "value": {"type": "integer"}
- }
- }
-
- [list, [list, str]] ->
- {
- "type": "array",
- "items": {
- "type": "array",
- "items": {"type": "string"}
- }
- }
-
- tuple[str, int, list[str]] ->
- {
- type: "array",
- items: [
- {"type": "string"},
- {"type": "integer"},
- {
- "type": "array",
- "items": {"type": "string"}
- }
- ]
- }
-
- Union[str, int] ->
- {
- "anyOf": [
- {"type": "string"},
- {"type": "integer"}
- ]
- }
-
- dict[int, list] ->
- {
- "type": "object",
- "properties": {
- "key": {"type": "integer"},
- "value": {
- "type": "array",
- "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
- }
- }
- }
-
- Optional[list] ->
- {
- "type": ["array", "null"],
- "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
- }
"""
- is_nullable = isinstance(arg, tuple)
arg_type: Mapping[str, Union[str, Mapping, Sequence]]
- if arg[0] is typing.Union:
+
+ if isinstance(arg, _ParsedOptionalAnnotation):
+ is_optional = True
+ arg = arg.annotation
+ else:
+ is_optional = False
+
+ if isinstance(arg, _ParsedUnionAnnotation):
arg_type = {
- "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg[1:]],
+ "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
}
- if arg[0] is tuple:
- if len(arg) == 1:
+
+ elif isinstance(arg, _ParsedTupleAnnotation):
+ if arg.annotation is None:
# tuple annotation with no type hints
# This is equivalent with a list, since the
# number of elements is not specified
arg_type = {
"type": "array",
- "items": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ "items": {"type": _JSON_SCHEMA_ANY},
}
else:
arg_type = {
"type": "array",
- "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg[1:]],
+ "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
}
- if arg[0] is list:
- if len(arg) == 1:
+
+ elif isinstance(arg, _ParsedListAnnotation):
+ if arg.annotation is None:
# list annotation with no type hints
- if isinstance(arg, tuple):
- # Support Optional annotation
- arg = (list, [Parameter.empty])
+ if is_optional:
+ arg_type = {
+ "type": ["array", "null"],
+ "items": {"type": _JSON_SCHEMA_ANY},
+ }
else:
- # Support non-Optional list annotation
- arg = [list, [Parameter.empty]]
- arg_type = {
- "type": "array",
- "items": _annotation_parse_to_json_schema(arg[1]),
- }
- if arg[0] is dict:
- if len(arg) == 1:
+ arg_type = {
+ "type": "array",
+ "items": {"type": _JSON_SCHEMA_ANY},
+ }
+ else:
+ arg_type = {
+ "type": "array",
+ "items": _annotation_parse_to_json_schema(arg.annotation),
+ }
+
+ elif isinstance(arg, _ParsedDictAnnotation):
+ if arg.key_annotation is None and arg.value_annotation is None:
# dict annotation with no type hints
- if isinstance(arg, tuple):
- arg = (dict, [Parameter.empty], [Parameter.empty])
+ if is_optional:
+ arg_type = {
+ "type": ["object", "null"],
+ "properties": {
+ "key": {"type": _JSON_SCHEMA_ANY},
+ "value": {"type": _JSON_SCHEMA_ANY},
+ },
+ }
else:
- arg = [dict, [Parameter.empty], [Parameter.empty]]
- arg_type = {
- "type": "object",
- "properties": {
- "key": _annotation_parse_to_json_schema(arg[1]),
- "value": _annotation_parse_to_json_schema(arg[2]),
- },
- }
- if arg[0] is builtins.str:
- arg_type = {"type": "string"}
- if arg[0] is builtins.int:
- arg_type = {"type": "integer"}
- if arg[0] is builtins.float:
- arg_type = {"type": "number"}
- if arg[0] is builtins.bool:
- arg_type = {"type": "boolean"}
- if arg[0] is Parameter.empty:
- # JSON Schema dropped support for 'any' type, we allow any type as a workaround
- arg_type = {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]}
-
- if is_nullable:
- if arg[0] is typing.Union:
- arg_type["anyOf"] = [ # type: ignore
- {**type_option, "type": [type_option["type"], "null"]} # type: ignore
- for type_option in arg_type["anyOf"] # type: ignore
- ]
+ arg_type = {
+ "type": "object",
+ "properties": {
+ "key": {"type": _JSON_SCHEMA_ANY},
+ "value": {"type": _JSON_SCHEMA_ANY},
+ },
+ }
+ else:
+ arg_type = {
+ "type": "object",
+ "properties": {
+ "key": _annotation_parse_to_json_schema(arg.key_annotation), # type: ignore
+ "value": _annotation_parse_to_json_schema(arg.value_annotation), # type: ignore
+ },
+ }
+
+ elif isinstance(arg, _ParsedPrimitiveAnnotation):
+ if arg.annotation is builtins.str:
+ arg_type = {"type": "string"}
+ if arg.annotation is builtins.int:
+ arg_type = {"type": "integer"}
+ if arg.annotation is builtins.float:
+ arg_type = {"type": "number"}
+ if arg.annotation is builtins.bool:
+ arg_type = {"type": "boolean"}
+ if arg.annotation is Parameter.empty:
+ # JSON Schema dropped support for 'any' type, we allow any type as a workaround
+ arg_type = {"type": _JSON_SCHEMA_ANY}
+
+ else:
+ raise ValueError(f"Unsupported annotation type: {arg}")
+
+ if is_optional:
+ if isinstance(arg, _ParsedUnionAnnotation):
+ for type_option in arg_type["anyOf"]:
+ if isinstance(type_option["type"], list) and "null" not in type_option["type"]: # type: ignore
+ type_option["type"] = [*type_option["type"], "null"] # type: ignore
+ elif not isinstance(type_option["type"], list): # type: ignore
+ type_option["type"] = [type_option["type"], "null"] # type: ignore
else:
- arg_type = {**arg_type, "type": [arg_type["type"], "null"]}
+ if isinstance(arg_type["type"], list) and "null" not in arg_type["type"]: # type: ignore
+ arg_type = {**arg_type, "type": [*arg_type["type"], "null"]} # type: ignore
+ elif not isinstance(arg_type["type"], list): # type: ignore
+ arg_type = {**arg_type, "type": [arg_type["type"], "null"]} # type: ignore
return arg_type
-def _parameter_is_optional(parameter: inspect.Parameter) -> bool:
+def _parameter_is_optional(
+ parameter: inspect.Parameter,
+) -> bool:
"""Check if tool parameter is mandatory.
Examples:
diff --git a/src/humanloop/eval_utils/__init__.py b/src/humanloop/eval_utils/__init__.py
index abd63528..ac5a5eba 100644
--- a/src/humanloop/eval_utils/__init__.py
+++ b/src/humanloop/eval_utils/__init__.py
@@ -1,4 +1,4 @@
-from .run import run_eval, log_with_evaluation_context
+from .run import log_with_evaluation_context, run_eval
from .types import File
__all__ = ["run_eval", "log_with_evaluation_context", "File"]
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
index a055faee..93469612 100644
--- a/src/humanloop/eval_utils/context.py
+++ b/src/humanloop/eval_utils/context.py
@@ -1,5 +1,4 @@
-import typing
-from contextvars import ContextVar
+from contextvars import ContextVar, Token
from typing import Callable, TypedDict
@@ -25,6 +24,4 @@ class EvaluationContext(TypedDict):
run_id: str
-EVALUATION_CONTEXT_VAR_NAME = "__EVALUATION_CONTEXT"
-
-EVALUATION_CONTEXT: ContextVar[typing.Optional[EvaluationContext]] = ContextVar(EVALUATION_CONTEXT_VAR_NAME)
+EVALUATION_CONTEXT_VARIABLE_NAME = "__EVALUATION_CONTEXT"
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index a8419601..86ee8cea 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -8,10 +8,11 @@
not be called directly.
"""
+from contextvars import ContextVar
+import copy
import inspect
import json
import logging
-import copy
import sys
import threading
import time
@@ -25,17 +26,15 @@
from pydantic import ValidationError
-
from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
-
-from humanloop.prompts.client import PromptsClient
from humanloop.core.api_error import ApiError
-from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.eval_utils.context import EvaluationContext
from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
# We use TypedDicts for requests, which is consistent with the rest of the SDK
from humanloop.evaluators.client import EvaluatorsClient
from humanloop.flows.client import FlowsClient
+from humanloop.prompts.client import PromptsClient
from humanloop.requests import CodeEvaluatorRequestParams as CodeEvaluatorDict
from humanloop.requests import ExternalEvaluatorRequestParams as ExternalEvaluator
from humanloop.requests import FlowKernelRequestParams as FlowDict
@@ -47,17 +46,17 @@
from humanloop.types import BooleanEvaluatorStatsResponse as BooleanStats
from humanloop.types import DatapointResponse as Datapoint
from humanloop.types import EvaluationResponse, EvaluationStats
-from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
-from humanloop.types.create_flow_log_response import CreateFlowLogResponse
-from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
-from humanloop.types.create_tool_log_response import CreateToolLogResponse
-from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
# Responses are Pydantic models and we leverage them for improved request validation
from humanloop.types import FlowKernelRequest as Flow
from humanloop.types import NumericEvaluatorStatsResponse as NumericStats
from humanloop.types import PromptKernelRequest as Prompt
from humanloop.types import ToolKernelRequest as Tool
+from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
+from humanloop.types.create_flow_log_response import CreateFlowLogResponse
+from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
+from humanloop.types.create_tool_log_response import CreateToolLogResponse
+from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
from humanloop.types.evaluation_run_response import EvaluationRunResponse
from humanloop.types.run_stats_response import RunStatsResponse
@@ -90,7 +89,10 @@
CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, ToolsClient, FlowsClient, EvaluatorsClient)
-def log_with_evaluation_context(client: CLIENT_TYPE) -> CLIENT_TYPE:
+def log_with_evaluation_context(
+ client: CLIENT_TYPE,
+ evaluation_context_variable: ContextVar[Optional[EvaluationContext]],
+) -> CLIENT_TYPE:
"""
Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT.
@@ -108,7 +110,7 @@ def _is_evaluated_file(
The user of the .log API can refer to the File that owns that Log either by
ID or Path. This function matches against any of them in EvaluationContext.
"""
- if evaluation_context == {}:
+ if evaluation_context is None:
return False
return evaluation_context.get("file_id") == log_args.get(file_id_attribute) or evaluation_context.get(
"path"
@@ -126,11 +128,7 @@ def _overloaded_log(
CreateFlowLogResponse,
CreateEvaluatorLogResponse,
]:
- evaluation_context: EvaluationContext
- try:
- evaluation_context = EVALUATION_CONTEXT.get() or {} # type: ignore
- except LookupError:
- evaluation_context = {} # type: ignore
+ evaluation_context = evaluation_context_variable.get()
if isinstance(client, PromptsClient):
file_id_attribute = "prompt_id"
@@ -148,9 +146,13 @@ def _overloaded_log(
):
# If the .log API user does not provide the source_datapoint_id or run_id,
# override them with the values from the EvaluationContext
+ evaluation_context = typing.cast(
+ EvaluationContext,
+ evaluation_context,
+ )
for attribute in ["source_datapoint_id", "run_id"]:
if attribute not in kwargs or kwargs[attribute] is None:
- kwargs[attribute] = evaluation_context.get(attribute)
+ kwargs[attribute] = evaluation_context[attribute] # type: ignore
# Call the original .log method
response = self._log(**kwargs)
@@ -162,13 +164,21 @@ def _overloaded_log(
file_id_attribute=file_id_attribute,
):
# Notify that the Log has been added to the Evaluation
- evaluation_context["upload_callback"](
+ # evaluation_context cannot be None
+ evaluation_context = typing.cast(
+ EvaluationContext,
+ evaluation_context,
+ )
+ evaluation_context["upload_callback"]( # type: ignore
{
"id": response.id,
**kwargs,
}
)
+ # Mark the Evaluation Context as consumed
+ evaluation_context_variable.set(None)
+
return response
# Replace the original log method with the overloaded one
@@ -225,9 +235,10 @@ def increment(self):
def run_eval(
client: "BaseHumanloop",
- file: Union[File, Callable],
+ file: File,
name: Optional[str],
dataset: Dataset,
+ evaluation_context_variable: ContextVar[Optional[EvaluationContext]],
evaluators: Optional[Sequence[Evaluator]] = None,
workers: int = 4,
) -> List[EvaluatorCheck]:
@@ -244,13 +255,30 @@ def run_eval(
"""
global _PROGRESS_BAR
- if isinstance(file, Callable): # type: ignore
- # Decorated function
- file_: File = copy.deepcopy(file.file) # type: ignore
+ if hasattr(file["callable"], "file"):
+ # When the decorator inside `file`` is a decorated function,
+ # we need to validate that the other parameters of `file`
+ # match the attributes of the decorator
+ inner_file: File = file["callable"].file
+ if "path" in file and inner_file["path"] != file["path"]:
+ raise ValueError(
+ "`path` attribute specified in the `file` does not match the File path of the decorated function."
+ )
+ if "version" in file and inner_file["version"] != file["version"]:
+ raise ValueError(
+ "`version` attribute in the `file` does not match the File version of the decorated function."
+ )
+ if "type" in file and inner_file["type"] != file["type"]:
+ raise ValueError(
+ "`type` attribute of `file` argument does not match the File type of the decorated function."
+ )
+ if "id" in file:
+ raise ValueError("Do not specify an `id` attribute in `file` argument when using a decorated function.")
+ # file on decorated function holds at least
+ # or more information than the `file` argument
+ file_ = copy.deepcopy(inner_file)
else:
- file_ = file # type: ignore
-
- is_decorated = file_.pop("is_decorated", False)
+ file_ = file
# Get or create the file on Humanloop
version = file_.pop("version", {})
@@ -357,6 +385,7 @@ def run_eval(
path=evaluator.get("path"),
spec=spec,
)
+ # function_ cannot be None, cast it for type checking
function_ = typing.cast(Callable, function_)
# Validate upfront that the local Evaluators and Dataset fit
@@ -406,36 +435,43 @@ def run_eval(
_PROGRESS_BAR = _SimpleProgressBar(len(hl_dataset.datapoints)) # type: ignore
- if is_decorated:
+ # Define the function to execute the `callable` in parallel and Log to Humanloop
+ def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
+ def upload_callback(log: dict):
+ """Logic ran after the Log has been created."""
+ _run_local_evaluators(
+ client=client,
+ log=log,
+ datapoint_target=dp.target,
+ local_evaluators=local_evaluators,
+ )
+ _PROGRESS_BAR.increment() # type: ignore
- def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
- def upload_callback(log: dict):
- # OTel exporter will call this after the Log is uploaded
- _run_local_evaluators(
- client=client,
- log=log,
- datapoint_target=dp.target,
- local_evaluators=local_evaluators,
- )
- _PROGRESS_BAR.increment() # type: ignore
-
- datapoint_dict = dp.dict()
- # Set the Evaluation Context for the Exporter
- # Each thread will have its own context
- EVALUATION_CONTEXT.set(
- EvaluationContext(
- source_datapoint_id=dp.id,
- upload_callback=upload_callback,
- file_id=file_id,
- run_id=run_id,
- path=file_path,
- )
+ datapoint_dict = dp.dict()
+ # Set the Evaluation Context for current datapoint
+ evaluation_context_variable.set(
+ EvaluationContext(
+ source_datapoint_id=dp.id,
+ upload_callback=upload_callback,
+ file_id=file_id,
+ run_id=run_id,
+ path=file_path,
)
+ )
+ log_func = _get_log_func(
+ client=client,
+ file_type=type_,
+ file_id=hl_file.id,
+ version_id=hl_file.version_id,
+ run_id=run_id,
+ )
+ start_time = datetime.now()
+ try:
if datapoint_dict.get("messages"):
# function_ is decorated by Humanloop, the OTel Exporter will
# handle the logging, which will call the upload_callback
# function above when it's done
- function_( # type: ignore
+ output = function_( # type: ignore
**datapoint_dict["inputs"],
messages=datapoint_dict["messages"],
)
@@ -443,61 +479,35 @@ def upload_callback(log: dict):
# function_ is decorated by Humanloop, the OTel Exporter will
# handle the logging, which will call the upload_callback
# function above when it's done
- function_(**datapoint_dict["inputs"]) # type: ignore
-
- else:
- # Define the function to execute your function in parallel and Log to Humanloop
- def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
- log_func = _get_log_func(
- client=client,
- file_type=type_,
- file_id=hl_file.id,
- version_id=hl_file.version_id,
- run_id=run_id,
- )
-
- start_time = datetime.now()
- datapoint_dict = dp.dict()
- try:
- if "messages" in datapoint_dict:
- output = function_( # type: ignore
- **datapoint_dict["inputs"],
- messages=datapoint_dict["messages"],
- )
- else:
- output = function_(**datapoint_dict["inputs"]) # type: ignore
- if not isinstance(output, str):
- try:
- output = json.dumps(output)
- # throw error if it fails to serialize
- except Exception as _:
- raise ValueError(
- f"Your {type_}'s `callable` must return a string or a JSON serializable object."
- )
- log = log_func(
+ output = function_(**datapoint_dict["inputs"]) # type: ignore
+
+ if not isinstance(output, str):
+ try:
+ output = json.dumps(output)
+ except Exception:
+ # throw error if it fails to serialize
+ raise ValueError(f"Your {type_}'s `callable` must return a string or a JSON serializable object.")
+
+ context_variable = evaluation_context_variable.get()
+ if context_variable is not None:
+ # Evaluation Context has not been consumed
+ # function_ is a plain callable so we need to create a Log
+ log_func(
inputs=datapoint.inputs,
output=output,
- source_datapoint_id=datapoint.id,
- start_time=start_time,
- end_time=datetime.now(),
- )
- except Exception as e:
- log = log_func(
- inputs=dp.inputs,
- error=str(e),
- source_datapoint_id=dp.id,
start_time=start_time,
end_time=datetime.now(),
)
- logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
-
- _run_local_evaluators(
- client=client,
- log=log,
- datapoint_target=dp.target,
- local_evaluators=local_evaluators,
+ except Exception as e:
+ log_func(
+ inputs=dp.inputs,
+ error=str(e),
+ source_datapoint_id=dp.id,
+ run_id=run_id,
+ start_time=start_time,
+ end_time=datetime.now(),
)
- _PROGRESS_BAR.increment() # type: ignore
+ logger.warning(msg=f"\nYour {type_}'s `callable` failed for Datapoint: {dp.id}. \n Error: {str(e)}")
# Execute the function and send the logs to Humanloop in parallel
logger.info(f"\n{CYAN}Navigate to your Evaluation:{RESET}\n{evaluation.url}\n")
diff --git a/src/humanloop/eval_utils/types.py b/src/humanloop/eval_utils/types.py
index 8db5832d..845a8542 100644
--- a/src/humanloop/eval_utils/types.py
+++ b/src/humanloop/eval_utils/types.py
@@ -48,7 +48,6 @@ class File(Identifiers):
If `messages` are defined in your Dataset, then `output = callable(**datapoint.inputs, messages=datapoint.messages)`.
It should return a string or json serializable output.
"""
- is_decorated: NotRequired[bool]
class Dataset(Identifiers):
diff --git a/src/humanloop/otel/constants.py b/src/humanloop/otel/constants.py
index 1215dceb..d28126a0 100644
--- a/src/humanloop/otel/constants.py
+++ b/src/humanloop/otel/constants.py
@@ -1,6 +1,6 @@
# Attribute name prefix on Humanloop spans for file-related attributes + path
-HL_FILE_KEY = "humanloop.file"
+HUMANLOOP_FILE_KEY = "humanloop.file"
# Attribute name prefix on Humanloop spans for log-related attributes
-HL_LOG_KEY = "humanloop.log"
-HL_FILE_TYPE_KEY = "humanloop.file.type"
-HL_PATH_KEY = "humanloop.file.path"
+HUMANLOOP_LOG_KEY = "humanloop.log"
+HUMANLOOP_FILE_TYPE_KEY = "humanloop.file.type"
+HUMANLOOP_PATH_KEY = "humanloop.file.path"
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 3cb294ba..468e833b 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,3 +1,4 @@
+import copy
import json
import logging
import typing
@@ -11,15 +12,15 @@
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.core import ApiError as HumanloopApiError
-from humanloop.eval_utils.context import EVALUATION_CONTEXT, EvaluationContext
+from humanloop.eval_utils.context import EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY, HL_PATH_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
if typing.TYPE_CHECKING:
- from humanloop.base_client import BaseHumanloop
+ from humanloop.client import Humanloop
logger = logging.getLogger("humanloop.sdk")
@@ -35,7 +36,7 @@ class HumanloopSpanExporter(SpanExporter):
def __init__(
self,
- client: "BaseHumanloop",
+ client: "Humanloop",
worker_threads: Optional[int] = None,
) -> None:
"""Upload Spans created by SDK decorators to Humanloop.
@@ -63,9 +64,22 @@ def __init__(
thread.start()
def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
+ def is_evaluated_file(
+ span: ReadableSpan,
+ evaluation_context: Optional[EvaluationContext],
+ ) -> bool:
+ if evaluation_context is None:
+ return False
+
+ return span.attributes.get(HUMANLOOP_PATH_KEY) == evaluation_context["path"] # type: ignore
+
if not self._shutdown:
try:
- evaluation_context = EVALUATION_CONTEXT.get()
+ evaluation_context = self._client.evaluation_context_variable.get()
+ if len(spans) > 1:
+ raise RuntimeError("HumanloopSpanExporter expected a single span when running an evaluation")
+ if not is_evaluated_file(spans[0], evaluation_context):
+ evaluation_context = None
except LookupError:
evaluation_context = None
for span in spans:
@@ -74,7 +88,20 @@ def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
# thread spawned by eval_utils.run.run_eval. Need
# to pass the EvaluationContext to the thread doing
# the logging
- self._upload_queue.put((span, evaluation_context))
+ self._upload_queue.put(
+ (
+ span,
+ copy.deepcopy(evaluation_context),
+ )
+ )
+ # Reset the EvaluationContext so run eval does not
+ # create a duplicate Log
+ if evaluation_context is not None and is_evaluated_file(
+ spans[0],
+ evaluation_context,
+ ):
+ # Mark the EvaluationContext as used
+ self._client.evaluation_context_variable.set(None)
return SpanExportResult.SUCCESS
else:
logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans")
@@ -107,16 +134,23 @@ def _do_work(self):
bottoms-up, while the upload of a Trace happens top-down. If a Span did not
have its span uploaded yet, it will be re-queued to be uploaded later.
"""
+
# Do work while the Exporter was not instructed to
# wind down or the queue is not empty
while self._upload_queue.qsize() > 0 or not self._shutdown:
try:
+ thread_args: tuple[ReadableSpan, EvaluationContext] # type: ignore
# Don't block or the thread will never be notified of the shutdown
- thread_args: tuple[ReadableSpan, EvaluationContext] = self._upload_queue.get(block=False) # type: ignore
+ thread_args = self._upload_queue.get(
+ block=False,
+ ) # type: ignore
span_to_export, evaluation_context = thread_args
- # Set the EvaluationContext for the thread so the .log action
- # works as expected
- EVALUATION_CONTEXT.set(evaluation_context)
+ # Set the EvaluationContext for the thread so the .log action works as expected
+ # NOTE: Expecting the evaluation thread to send a single span so we are
+ # not resetting the EvaluationContext in the scope of the export thread
+ self._client.evaluation_context_variable.set(
+ copy.deepcopy(evaluation_context),
+ )
except EmptyQueue:
continue
trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
@@ -135,8 +169,8 @@ def _do_work(self):
self._upload_queue.task_done()
def _export_span_dispatch(self, span: ReadableSpan) -> None:
- hl_file = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
- file_type = span._attributes.get(HL_FILE_TYPE_KEY) # type: ignore
+ hl_file = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ file_type = span._attributes.get(HUMANLOOP_FILE_TYPE_KEY) # type: ignore
if file_type == "prompt":
export_func = self._export_prompt
@@ -151,11 +185,11 @@ def _export_span_dispatch(self, span: ReadableSpan) -> None:
def _export_prompt(self, span: ReadableSpan) -> None:
file_object: dict[str, Any] = read_from_opentelemetry_span(
span,
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)
log_object: dict[str, Any] = read_from_opentelemetry_span(
span,
- key=HL_LOG_KEY,
+ key=HUMANLOOP_LOG_KEY,
)
# NOTE: Due to OTel conventions, attributes with value of None are removed
# If not present, instantiate as empty dictionary
@@ -171,17 +205,18 @@ def _export_prompt(self, span: ReadableSpan) -> None:
trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
if trace_parent_id is None:
# Parent Log in Trace upload failed
- file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
return
else:
trace_parent_id = None
prompt: PromptKernelRequestParams = file_object["prompt"]
path: str = file_object["path"]
- if not isinstance(log_object["output"], str):
- # Output expected to be a string, if decorated function
- # does not return one, jsonify it
- log_object["output"] = json.dumps(log_object["output"])
+ if "output" in log_object:
+ if not isinstance(log_object["output"], str):
+ # Output expected to be a string, if decorated function
+ # does not return one, jsonify it
+ log_object["output"] = json.dumps(log_object["output"])
if "attributes" not in prompt or not prompt["attributes"]:
prompt["attributes"] = {}
try:
@@ -192,12 +227,12 @@ def _export_prompt(self, span: ReadableSpan) -> None:
trace_parent_id=trace_parent_id,
)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- except HumanloopApiError as e:
+ except HumanloopApiError:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_tool(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
if "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
@@ -205,7 +240,7 @@ def _export_tool(self, span: ReadableSpan) -> None:
)
if trace_parent_id is None:
# Parent Log in Trace upload failed
- file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
return
else:
@@ -228,12 +263,12 @@ def _export_tool(self, span: ReadableSpan) -> None:
trace_parent_id=trace_parent_id,
)
self._span_id_to_uploaded_log_id[span.context.span_id] = log_response.id
- except HumanloopApiError as e:
+ except HumanloopApiError:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_flow(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_FILE_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HL_LOG_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
span.get_span_context().span_id,
{},
@@ -246,7 +281,7 @@ def _export_flow(self, span: ReadableSpan) -> None:
# Parent Log in Trace upload failed
# NOTE: Check if the trace_id metadata field points to the
# span itself. This signifies the span is the head of the Trace
- file_path = read_from_opentelemetry_span(span, key=HL_PATH_KEY)
+ file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY)
logger.error(f"Skipping log for {file_path}: parent Log upload failed")
return
else:
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 22591ebc..089f759e 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -1,19 +1,20 @@
+import uuid
from typing import Union
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.trace import SpanKind
from opentelemetry.util.types import AttributeValue
-from humanloop.otel.constants import HL_FILE_KEY, HL_LOG_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY
NestedDict = dict[str, Union["NestedDict", AttributeValue]]
NestedList = list[Union["NestedList", NestedDict]]
-def _list_to_ott(lst: NestedList) -> NestedDict:
+def _list_to_otel_format(lst: NestedList) -> NestedDict:
"""Transforms list of values to be written into a dictionary with index values as keys.
- When writing to Otel span attributes, only primitive values or lists are allowed.
+ When writing to OTel span attributes, only primitive values or lists are allowed.
Nested dictionaries must be linearised. For example, writing to span attribute `foo`
the dictionary value {'a': 7, 'b': 'hello'} would translated in the span attributes
dictionary to look like:
@@ -40,7 +41,7 @@ def _list_to_ott(lst: NestedList) -> NestedDict:
}
```
"""
- return {str(idx): val if not isinstance(val, list) else _list_to_ott(val) for idx, val in enumerate(lst)}
+ return {str(idx): val if not isinstance(val, list) else _list_to_otel_format(val) for idx, val in enumerate(lst)}
def write_to_opentelemetry_span(
@@ -85,7 +86,7 @@ def write_to_opentelemetry_span(
to_write_copy: Union[dict, AttributeValue]
if isinstance(value, list):
- to_write_copy = _list_to_ott(value)
+ to_write_copy = _list_to_otel_format(value)
else:
to_write_copy = dict(value) # type: ignore
linearised_attributes: dict[str, AttributeValue] = {}
@@ -267,8 +268,8 @@ def is_humanloop_span(span: ReadableSpan) -> bool:
"""Check if the Span was created by the Humanloop SDK."""
try:
# Valid spans will have keys with the HL_FILE_OT_KEY and HL_LOG_OT_KEY prefixes present
- read_from_opentelemetry_span(span, key=HL_FILE_KEY)
- read_from_opentelemetry_span(span, key=HL_LOG_KEY)
+ read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
except KeyError:
return False
return True
@@ -284,3 +285,7 @@ def module_is_installed(module_name: str) -> bool:
except ImportError:
return False
return True
+
+
+def generate_span_id() -> str:
+ return str(uuid.uuid4())
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 24a6749c..14fd7501 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -7,7 +7,7 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor, SpanExporter
from pydantic import ValidationError as PydanticValidationError
-from humanloop.otel.constants import HL_FILE_KEY, HL_FILE_TYPE_KEY, HL_LOG_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY
from humanloop.otel.helpers import (
is_humanloop_span,
is_llm_provider_call,
@@ -69,14 +69,14 @@ def _is_instrumentor_span(span: ReadableSpan) -> bool:
def _process_span_dispatch(span: ReadableSpan, children_spans: list[ReadableSpan]):
- file_type = span.attributes[HL_FILE_TYPE_KEY] # type: ignore
+ file_type = span.attributes[HUMANLOOP_FILE_TYPE_KEY] # type: ignore
# Processing common to all Humanloop File types
if span.start_time:
- span._attributes[f"{HL_LOG_KEY}.start_time"] = int(span.start_time / 1e9) # type: ignore
+ span._attributes[f"{HUMANLOOP_LOG_KEY}.start_time"] = int(span.start_time / 1e9) # type: ignore
if span.end_time:
- span._attributes[f"{HL_LOG_KEY}.end_time"] = int(span.end_time / 1e9) # type: ignore
- span._attributes[f"{HL_LOG_KEY}.created_at"] = int(span.end_time / 1e9) # type: ignore
+ span._attributes[f"{HUMANLOOP_LOG_KEY}.end_time"] = int(span.end_time / 1e9) # type: ignore
+ span._attributes[f"{HUMANLOOP_LOG_KEY}.created_at"] = int(span.end_time / 1e9) # type: ignore
# Processing specific to each Humanloop File type
if file_type == "prompt":
@@ -104,7 +104,7 @@ def _process_prompt(prompt_span: ReadableSpan, children_spans: list[ReadableSpan
def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
- hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_FILE_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_FILE_KEY)
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
llm_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="llm")
@@ -137,7 +137,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
hl_file["prompt"] = prompt
write_to_opentelemetry_span(
span=prompt_span,
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
# hl_file was modified in place via prompt_kernel reference
value=hl_file,
)
@@ -145,7 +145,7 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: ReadableSpan):
try:
- hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HL_LOG_KEY)
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(prompt_span, key=HUMANLOOP_LOG_KEY)
except KeyError:
hl_log = {}
gen_ai_object: dict[str, Any] = read_from_opentelemetry_span(llm_provider_call_span, key="gen_ai")
@@ -160,7 +160,7 @@ def _enrich_prompt_log(prompt_span: ReadableSpan, llm_provider_call_span: Readab
write_to_opentelemetry_span(
span=prompt_span,
- key=HL_LOG_KEY,
+ key=HUMANLOOP_LOG_KEY,
# hl_log was modified in place
value=hl_log,
)
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 1375d722..13764da8 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -9,7 +9,7 @@
from humanloop.decorators.prompt import prompt
from humanloop.decorators.tool import tool
from humanloop.otel import TRACE_FLOW_CONTEXT
-from humanloop.otel.constants import HL_FILE_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.helpers import read_from_opentelemetry_span
from openai import OpenAI
@@ -93,11 +93,11 @@ def test_decorators_without_flow(
assert len(spans) == 3
assert read_from_opentelemetry_span(
span=spans[1],
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)["tool"]
assert read_from_opentelemetry_span(
span=spans[2],
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)["prompt"]
for span in spans:
# THEN no metadata related to trace is present on either of them
@@ -133,8 +133,8 @@ def test_decorators_with_flow_decorator(
spans = exporter.get_finished_spans()
assert len(spans) == 4
# THEN the span are returned bottom to top
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HUMANLOOP_FILE_KEY)["prompt"]
# assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_OT_KEY)["flow"]
assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id))
assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id))
@@ -169,11 +169,11 @@ def test_flow_decorator_flow_in_flow(
# 4. Flow Span
spans = exporter.get_finished_spans()
assert len(spans) == 5
- assert read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["tool"]
- assert read_from_opentelemetry_span(span=spans[2], key=HL_FILE_KEY)["prompt"]
- assert read_from_opentelemetry_span(span=spans[3], key=HL_FILE_KEY)["flow"] != {}
+ assert read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["tool"]
+ assert read_from_opentelemetry_span(span=spans[2], key=HUMANLOOP_FILE_KEY)["prompt"]
+ assert read_from_opentelemetry_span(span=spans[3], key=HUMANLOOP_FILE_KEY)["flow"] != {}
with pytest.raises(KeyError):
- read_from_opentelemetry_span(span=spans[4], key=HL_FILE_KEY)["flow"] != {}
+ read_from_opentelemetry_span(span=spans[4], key=HUMANLOOP_FILE_KEY)["flow"] != {}
assert (tool_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[1].get_span_context().span_id))
assert (prompt_trace_metadata := TRACE_FLOW_CONTEXT.get(spans[2].get_span_context().span_id))
@@ -225,7 +225,7 @@ def test_flow_decorator_with_hl_exporter(
# THEN the last uploaded span is the Flow
assert read_from_opentelemetry_span(
span=flow_span,
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)["flow"]["attributes"] == { # type: ignore[index,call-overload]
"foo": "bar",
"baz": 7,
@@ -233,12 +233,12 @@ def test_flow_decorator_with_hl_exporter(
# THEN the second uploaded span is the Prompt
assert "prompt" in read_from_opentelemetry_span(
span=prompt_span,
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)
# THEN the first uploaded span is the Tool
assert "tool" in read_from_opentelemetry_span(
span=tool_span,
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)
# NOTE: The type: ignore comments are caused by the MagicMock used to mock the HTTP client
diff --git a/tests/decorators/test_prompt_decorator.py b/tests/decorators/test_prompt_decorator.py
index 3e9e690c..4da40358 100644
--- a/tests/decorators/test_prompt_decorator.py
+++ b/tests/decorators/test_prompt_decorator.py
@@ -12,7 +12,7 @@
from groq import Groq
from groq import NotFoundError as GroqNotFoundError
from humanloop.decorators.prompt import prompt
-from humanloop.otel.constants import HL_FILE_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.types.model_providers import ModelProviders
from humanloop.types.prompt_kernel_request import PromptKernelRequest
@@ -194,7 +194,7 @@ def test_prompt_decorator_with_hl_processor(
prompt_kernel = PromptKernelRequest.model_validate(
read_from_opentelemetry_span(
span=spans[1],
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)["prompt"] # type: ignore
)
# THEN temperature is intercepted from LLM provider call
@@ -234,7 +234,7 @@ def test_prompt_decorator_with_defaults(
spans = exporter.get_finished_spans()
# THEN the Prompt span is enhanced with information and forms a correct PromptKernel
prompt = PromptKernelRequest.model_validate(
- read_from_opentelemetry_span(span=spans[1], key=HL_FILE_KEY)["prompt"] # type: ignore
+ read_from_opentelemetry_span(span=spans[1], key=HUMANLOOP_FILE_KEY)["prompt"] # type: ignore
)
# THEN temperature intercepted from LLM provider call is overridden by default value
assert prompt.temperature == 0.9
@@ -244,35 +244,6 @@ def test_prompt_decorator_with_defaults(
assert prompt.model == model
-@pytest.mark.parametrize(
- "hyperparameters",
- (
- {"temperature": 1.1},
- {"top_p": 1.1},
- {"presence_penalty": 3},
- {"frequency_penalty": 3},
- ),
-)
-def test_hyperparameter_values_fail_out_of_domain(
- opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
- hyperparameters: dict[str, float],
-):
- tracer, _ = opentelemetry_test_configuration
-
- # GIVEN a Prompt decorated function
-
- with pytest.raises(ValueError):
- # WHEN passing default values to the @prompt decorator that are out of domain
-
- _test_scenario(
- opentelemetry_tracer=tracer,
- path=None,
- **hyperparameters,
- )
-
- # THEN an exception is raised
-
-
@pytest.mark.parametrize(
"attributes_test_expected",
[
@@ -315,7 +286,7 @@ def test_prompt_attributes(
prompt_kernel = PromptKernelRequest.model_validate(
read_from_opentelemetry_span(
span=exporter.get_finished_spans()[1],
- key=HL_FILE_KEY,
+ key=HUMANLOOP_FILE_KEY,
)["prompt"] # type: ignore
)
assert prompt_kernel.attributes == expected_attributes
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 1a797839..0e4cc0c8 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -2,7 +2,7 @@
import pytest
from humanloop.decorators.tool import tool
-from humanloop.otel.constants import HL_FILE_KEY, HL_LOG_KEY
+from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_LOG_KEY
from humanloop.otel.helpers import read_from_opentelemetry_span
from jsonschema.protocols import Validator
from opentelemetry.sdk.trace import Tracer
@@ -34,8 +34,8 @@ def calculator(operation: str, num1: float, num2: float) -> float:
# THEN a single span is created and the log and file attributes are correctly set
spans = exporter.get_finished_spans()
assert len(spans) == 1
- hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_FILE_KEY)
- hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HL_LOG_KEY)
+ hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_FILE_KEY)
+ hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_LOG_KEY)
assert hl_log["output"] == result == 3
assert hl_log["inputs"] == {
"operation": "add",
@@ -428,3 +428,53 @@ def foo_bar(foo: Foo):
# THEN a ValueError is raised
assert exc.value.args[0].startswith("foo_bar: Unsupported type hint")
+
+
+def test_tool_as_higher_order_function(
+ opentelemetry_hl_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ tracer, exporter = opentelemetry_hl_test_configuration
+
+ def calculator(operation: str, num1: float, num2: float) -> float:
+ """Do arithmetic operations on two numbers."""
+ if operation == "add":
+ return num1 + num2
+ elif operation == "subtract":
+ return num1 - num2
+ elif operation == "multiply":
+ return num1 * num2
+ elif operation == "divide":
+ return num1 / num2
+ else:
+ raise ValueError(f"Invalid operation: {operation}")
+
+ higher_order_fn_tool = tool(opentelemetry_tracer=tracer)(calculator)
+
+ @tool(opentelemetry_tracer=tracer) # type: ignore
+ def calculator(operation: str, num1: float, num2: float) -> float:
+ """Do arithmetic operations on two numbers."""
+ if operation == "add":
+ return num1 + num2
+ elif operation == "subtract":
+ return num1 - num2
+ elif operation == "multiply":
+ return num1 * num2
+ elif operation == "divide":
+ return num1 / num2
+ else:
+ raise ValueError(f"Invalid operation: {operation}")
+
+ higher_order_fn_tool(operation="add", num1=1, num2=2)
+ calculator(operation="add", num1=1, num2=2)
+
+ assert len(spans := exporter.get_finished_spans()) == 2
+
+ hl_file_higher_order_fn = read_from_opentelemetry_span(
+ span=spans[0],
+ key=HUMANLOOP_FILE_KEY,
+ )
+ hl_file_decorated_fn = read_from_opentelemetry_span(
+ span=spans[1],
+ key=HUMANLOOP_FILE_KEY,
+ )
+ assert hl_file_higher_order_fn["tool"]["source_code"] == hl_file_decorated_fn["tool"]["source_code"] # type: ignore
From 1be415004600515b60bc97e8cba4e5e6f5936d32 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 10:12:20 +0000
Subject: [PATCH 47/70] PR #30
---
src/humanloop/eval_utils/run.py | 2 +-
src/humanloop/otel/exporter.py | 6 ++----
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index 86ee8cea..a7348529 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -467,7 +467,7 @@ def upload_callback(log: dict):
)
start_time = datetime.now()
try:
- if datapoint_dict.get("messages"):
+ if "messages" in datapoint_dict and datapoint_dict["messages"] is not None:
# function_ is decorated by Humanloop, the OTel Exporter will
# handle the logging, which will call the upload_callback
# function above when it's done
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 468e833b..83a374b5 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -84,10 +84,8 @@ def is_evaluated_file(
evaluation_context = None
for span in spans:
if is_humanloop_span(span):
- # The thread doing the logging is different than the
- # thread spawned by eval_utils.run.run_eval. Need
- # to pass the EvaluationContext to the thread doing
- # the logging
+ # We pass the EvaluationContext from the eval_run utility thread to
+ # the export thread so the .log action works as expected
self._upload_queue.put(
(
span,
From e2e3254765a7e669ca56ef67fce3617cc661d296 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 14:37:35 +0000
Subject: [PATCH 48/70] evals_run modification works with simple callable and
instrumented callables
---
src/humanloop/decorators/prompt.py | 4 ----
src/humanloop/eval_utils/run.py | 18 ++++--------------
src/humanloop/flows/client.py | 1 +
src/humanloop/otel/exporter.py | 1 -
4 files changed, 5 insertions(+), 19 deletions(-)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index d146d665..363f18a2 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,15 +1,11 @@
import logging
-import os
-import sys
import typing
-import uuid
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
-from humanloop.types.prompt_kernel_request import PromptKernelRequest
if typing.TYPE_CHECKING:
from humanloop import ToolFunctionParams
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index a7348529..572a4e7f 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -103,7 +103,6 @@ def log_with_evaluation_context(
def _is_evaluated_file(
evaluation_context: EvaluationContext,
log_args: dict,
- file_id_attribute: str,
) -> bool:
"""Check if the File that will Log against is part of the current Evaluation.
@@ -112,7 +111,7 @@ def _is_evaluated_file(
"""
if evaluation_context is None:
return False
- return evaluation_context.get("file_id") == log_args.get(file_id_attribute) or evaluation_context.get(
+ return evaluation_context.get("file_id") == log_args.get("id") or evaluation_context.get(
"path"
) == log_args.get("path")
@@ -130,19 +129,9 @@ def _overloaded_log(
]:
evaluation_context = evaluation_context_variable.get()
- if isinstance(client, PromptsClient):
- file_id_attribute = "prompt_id"
- elif isinstance(client, ToolsClient):
- file_id_attribute = "tool_id"
- elif isinstance(client, FlowsClient):
- file_id_attribute = "flow_id"
- elif isinstance(client, EvaluatorsClient):
- file_id_attribute = "evaluator_id"
-
if _is_evaluated_file(
evaluation_context=evaluation_context, # type: ignore
log_args=kwargs,
- file_id_attribute=file_id_attribute,
):
# If the .log API user does not provide the source_datapoint_id or run_id,
# override them with the values from the EvaluationContext
@@ -161,7 +150,6 @@ def _overloaded_log(
if _is_evaluated_file(
evaluation_context=evaluation_context, # type: ignore
log_args=kwargs,
- file_id_attribute=file_id_attribute,
):
# Notify that the Log has been added to the Evaluation
# evaluation_context cannot be None
@@ -171,8 +159,10 @@ def _overloaded_log(
)
evaluation_context["upload_callback"]( # type: ignore
{
- "id": response.id,
**kwargs,
+ # ID in kwargs refers to the File ID
+ # Replace it with the Log ID
+ "id": response.id,
}
)
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 1884b45c..edb48562 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -200,6 +200,7 @@ def log(
),
)
"""
+
_response = self._client_wrapper.httpx_client.request(
"flows/log",
method="POST",
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 83a374b5..acad8590 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -195,7 +195,6 @@ def _export_prompt(self, span: ReadableSpan) -> None:
log_object["inputs"] = {}
if "messages" not in log_object:
log_object["messages"] = []
- # Same as with messages above
if "tools" not in file_object["prompt"]:
file_object["prompt"]["tools"] = []
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
From 0a499ad0ff8c90bae7539ce0c8e761ea696e3281 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 15:09:33 +0000
Subject: [PATCH 49/70] Harry PR feedback
---
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/decorators/prompt.py | 1 -
src/humanloop/eval_utils/context.py | 1 -
src/humanloop/eval_utils/run.py | 28 ++++++++++++++++++--
src/humanloop/otel/exporter.py | 41 ++++++++++++++++++++++++-----
src/humanloop/otel/helpers.py | 2 +-
6 files changed, 63 insertions(+), 12 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 3b22d8db..2cb3d2ad 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,7 +1,7 @@
import logging
-from functools import wraps
import os
import sys
+from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
from opentelemetry.sdk.trace import Span
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 363f18a2..5ab10c4f 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -6,7 +6,6 @@
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
-
if typing.TYPE_CHECKING:
from humanloop import ToolFunctionParams
from humanloop.decorators.helpers import args_to_inputs
diff --git a/src/humanloop/eval_utils/context.py b/src/humanloop/eval_utils/context.py
index 93469612..f05b9585 100644
--- a/src/humanloop/eval_utils/context.py
+++ b/src/humanloop/eval_utils/context.py
@@ -1,4 +1,3 @@
-from contextvars import ContextVar, Token
from typing import Callable, TypedDict
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index 572a4e7f..b28a9efd 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -8,7 +8,6 @@
not be called directly.
"""
-from contextvars import ContextVar
import copy
import inspect
import json
@@ -19,6 +18,7 @@
import types
import typing
from concurrent.futures import ThreadPoolExecutor
+from contextvars import ContextVar
from datetime import datetime
from functools import partial
from logging import INFO
@@ -144,6 +144,12 @@ def _overloaded_log(
kwargs[attribute] = evaluation_context[attribute] # type: ignore
# Call the original .log method
+ logger.debug(
+ "Logging %s inside _overloaded_log on Thread %s",
+ kwargs,
+ evaluation_context,
+ threading.get_ident(),
+ )
response = self._log(**kwargs)
# Call the callback so the Evaluation can be updated
@@ -174,6 +180,7 @@ def _overloaded_log(
# Replace the original log method with the overloaded one
client.log = types.MethodType(_overloaded_log, client) # type: ignore
# Return the client with the overloaded log method
+ logger.debug("Overloaded the .log method of %s", client)
return client
@@ -246,7 +253,7 @@ def run_eval(
global _PROGRESS_BAR
if hasattr(file["callable"], "file"):
- # When the decorator inside `file`` is a decorated function,
+ # When the decorator inside `file` is a decorated function,
# we need to validate that the other parameters of `file`
# match the attributes of the decorator
inner_file: File = file["callable"].file
@@ -429,6 +436,12 @@ def run_eval(
def process_datapoint(dp: Datapoint, file_id: str, file_path: str, run_id: str):
def upload_callback(log: dict):
"""Logic ran after the Log has been created."""
+ logger.debug(
+ "upload_callback on Thread %s: log %s datapoint_target %s",
+ threading.get_ident(),
+ log,
+ dp.target,
+ )
_run_local_evaluators(
client=client,
log=log,
@@ -448,6 +461,12 @@ def upload_callback(log: dict):
path=file_path,
)
)
+ logger.debug(
+ "process_datapoint on Thread %s: evaluating Datapoint %s with EvaluationContext %s",
+ threading.get_ident(),
+ datapoint_dict,
+ evaluation_context_variable.get(),
+ )
log_func = _get_log_func(
client=client,
file_type=type_,
@@ -482,6 +501,11 @@ def upload_callback(log: dict):
if context_variable is not None:
# Evaluation Context has not been consumed
# function_ is a plain callable so we need to create a Log
+ logger.debug(
+ "process_datapoint on Thread %s: function_ %s is a simple callable, context was not consumed",
+ threading.get_ident(),
+ function_.__name__,
+ )
log_func(
inputs=datapoint.inputs,
output=output,
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index acad8590..ce99ee40 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,6 +1,7 @@
-import copy
+import contextvars
import json
import logging
+import threading
import typing
from queue import Empty as EmptyQueue
from queue import Queue
@@ -62,6 +63,7 @@ def __init__(
self._shutdown: bool = False
for thread in self._threads:
thread.start()
+ logger.debug("Exporter Thread %s started", thread.ident)
def export(self, spans: trace.Sequence[ReadableSpan]) -> SpanExportResult:
def is_evaluated_file(
@@ -89,8 +91,13 @@ def is_evaluated_file(
self._upload_queue.put(
(
span,
- copy.deepcopy(evaluation_context),
- )
+ contextvars.copy_context()[self._client.evaluation_context_variable],
+ ),
+ )
+ logger.debug(
+ "Span %s with EvaluationContext %s added to upload queue",
+ span.attributes,
+ contextvars.copy_context()[self._client.evaluation_context_variable],
)
# Reset the EvaluationContext so run eval does not
# create a duplicate Log
@@ -98,6 +105,11 @@ def is_evaluated_file(
spans[0],
evaluation_context,
):
+ logger.debug(
+ "EvaluationContext %s marked as exhausted for Log in Span %s",
+ evaluation_context,
+ spans[0].attributes,
+ )
# Mark the EvaluationContext as used
self._client.evaluation_context_variable.set(None)
return SpanExportResult.SUCCESS
@@ -109,6 +121,7 @@ def shutdown(self) -> None:
self._shutdown = True
for thread in self._threads:
thread.join()
+ logger.debug("Exporter Thread %s joined", thread.ident)
def force_flush(self, timeout_millis: int = 3000) -> bool:
self._shutdown = True
@@ -146,21 +159,37 @@ def _do_work(self):
# Set the EvaluationContext for the thread so the .log action works as expected
# NOTE: Expecting the evaluation thread to send a single span so we are
# not resetting the EvaluationContext in the scope of the export thread
- self._client.evaluation_context_variable.set(
- copy.deepcopy(evaluation_context),
- )
+ self._client.evaluation_context_variable.set(evaluation_context)
except EmptyQueue:
continue
trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
if trace_metadata is None:
# Span is not part of a Flow Log
self._export_span_dispatch(span_to_export)
+ logger.debug(
+ "_do_work on Thread %s: Dispatched span %s with FlowContext %s which is not part of a Flow",
+ threading.get_ident(),
+ span_to_export.attributes,
+ trace_metadata,
+ )
elif trace_metadata["trace_parent_id"] is None:
# Span is the head of a Flow Trace
self._export_span_dispatch(span_to_export)
+ logger.debug(
+ "Dispatched span %s which is a Flow Log with FlowContext %s",
+ span_to_export.attributes,
+ trace_metadata,
+ )
elif trace_metadata["trace_parent_id"] in self._span_id_to_uploaded_log_id:
# Span is part of a Flow and its parent has been uploaded
self._export_span_dispatch(span_to_export)
+ logger.debug(
+ "_do_work on Thread %s: Dispatched span %s after its parent %s with FlowContext %s",
+ threading.get_ident(),
+ span_to_export.attributes,
+ trace_metadata["trace_parent_id"],
+ trace_metadata,
+ )
else:
# Requeue the Span to be uploaded later
self._upload_queue.put((span_to_export, evaluation_context))
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 089f759e..6e5baf11 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -196,7 +196,7 @@ def read_from_opentelemetry_span(span: ReadableSpan, key: str = "") -> NestedDic
sub_result[part] = span_value
else:
if part not in sub_result:
- # New dict since
+ # Create new dict for a previously unseen part of the key
sub_result[part] = {}
sub_result = sub_result[part] # type: ignore
From b2bf1743e578ed38a1e9e545ac45ff7da78499db Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 15:14:29 +0000
Subject: [PATCH 50/70] changed relative import
---
src/humanloop/decorators/tool.py | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index ec95251c..6b15bcb8 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -1,8 +1,6 @@
import builtins
import inspect
import logging
-import os
-import sys
import textwrap
import typing
from dataclasses import dataclass
@@ -19,7 +17,7 @@
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
-from .helpers import args_to_inputs
+from humanloop.otel.helpers import args_to_inputs
logger = logging.getLogger("humanloop.sdk")
From 25db65970ce499587fe61df29d5d732cc4d30ff0 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 15:23:14 +0000
Subject: [PATCH 51/70] import
---
src/humanloop/decorators/tool.py | 43 ++++++++++++++++++++++++++------
1 file changed, 35 insertions(+), 8 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 6b15bcb8..2f3d41a8 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -12,12 +12,17 @@
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
+from humanloop.otel.constants import (
+ HUMANLOOP_FILE_KEY,
+ HUMANLOOP_FILE_TYPE_KEY,
+ HUMANLOOP_LOG_KEY,
+ HUMANLOOP_PATH_KEY,
+)
from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
-from humanloop.otel.helpers import args_to_inputs
+from humanloop.decorators.helpers import args_to_inputs
logger = logging.getLogger("humanloop.sdk")
@@ -163,7 +168,9 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
):
- raise ValueError(f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator")
+ raise ValueError(
+ f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator"
+ )
for parameter in signature.parameters.values():
try:
@@ -267,7 +274,16 @@ def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
if origin is None:
# Either not a nested type or no type hint
# Parameter.empty is used for parameters without type hints
- if annotation not in (str, int, float, bool, Parameter.empty, dict, list, tuple):
+ if annotation not in (
+ str,
+ int,
+ float,
+ bool,
+ Parameter.empty,
+ dict,
+ list,
+ tuple,
+ ):
raise ValueError(f"Unsupported type hint: {annotation}")
# Check if it's a complex type with no inner type
@@ -321,7 +337,9 @@ def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
# Union has sub_types and is Optional
return _ParsedOptionalAnnotation(
annotation=_ParsedUnionAnnotation(
- annotation=[_parse_annotation(sub_type) for sub_type in sub_types[:-1]],
+ annotation=[
+ _parse_annotation(sub_type) for sub_type in sub_types[:-1]
+ ],
)
)
# Union type that is not Optional
@@ -355,7 +373,10 @@ def _annotation_parse_to_json_schema(
if isinstance(arg, _ParsedUnionAnnotation):
arg_type = {
- "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
+ "anyOf": [
+ _annotation_parse_to_json_schema(sub_type)
+ for sub_type in arg.annotation
+ ],
}
elif isinstance(arg, _ParsedTupleAnnotation):
@@ -370,7 +391,10 @@ def _annotation_parse_to_json_schema(
else:
arg_type = {
"type": "array",
- "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
+ "items": [
+ _annotation_parse_to_json_schema(sub_type)
+ for sub_type in arg.annotation
+ ],
}
elif isinstance(arg, _ParsedListAnnotation):
@@ -439,7 +463,10 @@ def _annotation_parse_to_json_schema(
if is_optional:
if isinstance(arg, _ParsedUnionAnnotation):
for type_option in arg_type["anyOf"]:
- if isinstance(type_option["type"], list) and "null" not in type_option["type"]: # type: ignore
+ if (
+ isinstance(type_option["type"], list)
+ and "null" not in type_option["type"]
+ ): # type: ignore
type_option["type"] = [*type_option["type"], "null"] # type: ignore
elif not isinstance(type_option["type"], list): # type: ignore
type_option["type"] = [type_option["type"], "null"] # type: ignore
From ab050afe8ec98a14c9eb1db064afe7c4c4897c59 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 15:26:03 +0000
Subject: [PATCH 52/70] deepcopy
---
src/humanloop/otel/exporter.py | 65 ++++++++++++++++++++++++++--------
1 file changed, 50 insertions(+), 15 deletions(-)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index ce99ee40..3251c258 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -3,6 +3,7 @@
import logging
import threading
import typing
+import copy
from queue import Empty as EmptyQueue
from queue import Queue
from threading import Thread
@@ -15,7 +16,12 @@
from humanloop.core import ApiError as HumanloopApiError
from humanloop.eval_utils.context import EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
-from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
+from humanloop.otel.constants import (
+ HUMANLOOP_FILE_KEY,
+ HUMANLOOP_FILE_TYPE_KEY,
+ HUMANLOOP_LOG_KEY,
+ HUMANLOOP_PATH_KEY,
+)
from humanloop.otel.helpers import is_humanloop_span, read_from_opentelemetry_span
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
@@ -79,7 +85,9 @@ def is_evaluated_file(
try:
evaluation_context = self._client.evaluation_context_variable.get()
if len(spans) > 1:
- raise RuntimeError("HumanloopSpanExporter expected a single span when running an evaluation")
+ raise RuntimeError(
+ "HumanloopSpanExporter expected a single span when running an evaluation"
+ )
if not is_evaluated_file(spans[0], evaluation_context):
evaluation_context = None
except LookupError:
@@ -91,13 +99,15 @@ def is_evaluated_file(
self._upload_queue.put(
(
span,
- contextvars.copy_context()[self._client.evaluation_context_variable],
+ copy.deepcopy(
+ self._client.evaluation_context_variable.get()
+ ),
),
)
logger.debug(
"Span %s with EvaluationContext %s added to upload queue",
span.attributes,
- contextvars.copy_context()[self._client.evaluation_context_variable],
+ copy.deepcopy(self._client.evaluation_context_variable.get()),
)
# Reset the EvaluationContext so run eval does not
# create a duplicate Log
@@ -114,7 +124,9 @@ def is_evaluated_file(
self._client.evaluation_context_variable.set(None)
return SpanExportResult.SUCCESS
else:
- logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans")
+ logger.warning(
+ "HumanloopSpanExporter is shutting down, not accepting new spans"
+ )
return SpanExportResult.FAILURE
def shutdown(self) -> None:
@@ -162,7 +174,9 @@ def _do_work(self):
self._client.evaluation_context_variable.set(evaluation_context)
except EmptyQueue:
continue
- trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
+ trace_metadata = TRACE_FLOW_CONTEXT.get(
+ span_to_export.get_span_context().span_id
+ )
if trace_metadata is None:
# Span is not part of a Flow Log
self._export_span_dispatch(span_to_export)
@@ -227,8 +241,14 @@ def _export_prompt(self, span: ReadableSpan) -> None:
if "tools" not in file_object["prompt"]:
file_object["prompt"]["tools"] = []
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
- if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
- trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
+ if (
+ trace_metadata
+ and "trace_parent_id" in trace_metadata
+ and trace_metadata["trace_parent_id"]
+ ):
+ trace_parent_id = self._span_id_to_uploaded_log_id[
+ trace_metadata["trace_parent_id"]
+ ]
if trace_parent_id is None:
# Parent Log in Trace upload failed
file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY)
@@ -257,9 +277,15 @@ def _export_prompt(self, span: ReadableSpan) -> None:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_tool(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
- trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
+ file_object: dict[str, Any] = read_from_opentelemetry_span(
+ span, key=HUMANLOOP_FILE_KEY
+ )
+ log_object: dict[str, Any] = read_from_opentelemetry_span(
+ span, key=HUMANLOOP_LOG_KEY
+ )
+ trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
+ span.get_span_context().span_id, {}
+ )
if "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"],
@@ -293,8 +319,12 @@ def _export_tool(self, span: ReadableSpan) -> None:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_flow(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
- log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
+ file_object: dict[str, Any] = read_from_opentelemetry_span(
+ span, key=HUMANLOOP_FILE_KEY
+ )
+ log_object: dict[str, Any] = read_from_opentelemetry_span(
+ span, key=HUMANLOOP_LOG_KEY
+ )
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
span.get_span_context().span_id,
{},
@@ -303,7 +333,10 @@ def _export_flow(self, span: ReadableSpan) -> None:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"], # type: ignore
)
- if trace_parent_id is None and trace_metadata["trace_id"] != span.get_span_context().span_id:
+ if (
+ trace_parent_id is None
+ and trace_metadata["trace_id"] != span.get_span_context().span_id
+ ):
# Parent Log in Trace upload failed
# NOTE: Check if the trace_id metadata field points to the
# span itself. This signifies the span is the head of the Trace
@@ -327,7 +360,9 @@ def _export_flow(self, span: ReadableSpan) -> None:
**log_object,
trace_parent_id=trace_parent_id,
)
- self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
+ self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = (
+ log_response.id
+ )
except HumanloopApiError as e:
logger.error(str(e))
self._span_id_to_uploaded_log_id[span.context.span_id] = None
From ba5ba1ca739f463a789beae2902a05f7369901a9 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 15:26:56 +0000
Subject: [PATCH 53/70] deepcopy
---
src/humanloop/otel/exporter.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 3251c258..8539b212 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -99,15 +99,13 @@ def is_evaluated_file(
self._upload_queue.put(
(
span,
- copy.deepcopy(
- self._client.evaluation_context_variable.get()
- ),
+ copy.deepcopy(evaluation_context),
),
)
logger.debug(
"Span %s with EvaluationContext %s added to upload queue",
span.attributes,
- copy.deepcopy(self._client.evaluation_context_variable.get()),
+ copy.deepcopy(evaluation_context),
)
# Reset the EvaluationContext so run eval does not
# create a duplicate Log
From 93650106000fd85a1536f903fa2f27967708e65a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 16:25:53 +0000
Subject: [PATCH 54/70] Debug session Harry
---
src/humanloop/otel/exporter.py | 53 +++++++++-------------------------
1 file changed, 14 insertions(+), 39 deletions(-)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 8539b212..cfcf356a 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -85,9 +85,7 @@ def is_evaluated_file(
try:
evaluation_context = self._client.evaluation_context_variable.get()
if len(spans) > 1:
- raise RuntimeError(
- "HumanloopSpanExporter expected a single span when running an evaluation"
- )
+ raise RuntimeError("HumanloopSpanExporter expected a single span when running an evaluation")
if not is_evaluated_file(spans[0], evaluation_context):
evaluation_context = None
except LookupError:
@@ -122,9 +120,7 @@ def is_evaluated_file(
self._client.evaluation_context_variable.set(None)
return SpanExportResult.SUCCESS
else:
- logger.warning(
- "HumanloopSpanExporter is shutting down, not accepting new spans"
- )
+ logger.warning("HumanloopSpanExporter is shutting down, not accepting new spans")
return SpanExportResult.FAILURE
def shutdown(self) -> None:
@@ -172,9 +168,7 @@ def _do_work(self):
self._client.evaluation_context_variable.set(evaluation_context)
except EmptyQueue:
continue
- trace_metadata = TRACE_FLOW_CONTEXT.get(
- span_to_export.get_span_context().span_id
- )
+ trace_metadata = TRACE_FLOW_CONTEXT.get(span_to_export.get_span_context().span_id)
if trace_metadata is None:
# Span is not part of a Flow Log
self._export_span_dispatch(span_to_export)
@@ -239,14 +233,8 @@ def _export_prompt(self, span: ReadableSpan) -> None:
if "tools" not in file_object["prompt"]:
file_object["prompt"]["tools"] = []
trace_metadata = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id)
- if (
- trace_metadata
- and "trace_parent_id" in trace_metadata
- and trace_metadata["trace_parent_id"]
- ):
- trace_parent_id = self._span_id_to_uploaded_log_id[
- trace_metadata["trace_parent_id"]
- ]
+ if trace_metadata and "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
+ trace_parent_id = self._span_id_to_uploaded_log_id[trace_metadata["trace_parent_id"]]
if trace_parent_id is None:
# Parent Log in Trace upload failed
file_path = read_from_opentelemetry_span(span, key=HUMANLOOP_PATH_KEY)
@@ -275,15 +263,9 @@ def _export_prompt(self, span: ReadableSpan) -> None:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_tool(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(
- span, key=HUMANLOOP_FILE_KEY
- )
- log_object: dict[str, Any] = read_from_opentelemetry_span(
- span, key=HUMANLOOP_LOG_KEY
- )
- trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
- span.get_span_context().span_id, {}
- )
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
+ trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(span.get_span_context().span_id, {})
if "trace_parent_id" in trace_metadata and trace_metadata["trace_parent_id"]:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"],
@@ -301,6 +283,8 @@ def _export_tool(self, span: ReadableSpan) -> None:
if not tool.get("setup_values"):
tool["setup_values"] = {}
path: str = file_object["path"]
+ if "parameters" in tool["function"] and "properties" not in tool["function"]["parameters"]:
+ tool["function"]["parameters"]["properties"] = {}
if not isinstance(log_object["output"], str):
# Output expected to be a string, if decorated function
# does not return one, jsonify it
@@ -317,12 +301,8 @@ def _export_tool(self, span: ReadableSpan) -> None:
self._span_id_to_uploaded_log_id[span.context.span_id] = None
def _export_flow(self, span: ReadableSpan) -> None:
- file_object: dict[str, Any] = read_from_opentelemetry_span(
- span, key=HUMANLOOP_FILE_KEY
- )
- log_object: dict[str, Any] = read_from_opentelemetry_span(
- span, key=HUMANLOOP_LOG_KEY
- )
+ file_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_FILE_KEY)
+ log_object: dict[str, Any] = read_from_opentelemetry_span(span, key=HUMANLOOP_LOG_KEY)
trace_metadata: FlowContext = TRACE_FLOW_CONTEXT.get(
span.get_span_context().span_id,
{},
@@ -331,10 +311,7 @@ def _export_flow(self, span: ReadableSpan) -> None:
trace_parent_id = self._span_id_to_uploaded_log_id.get(
trace_metadata["trace_parent_id"], # type: ignore
)
- if (
- trace_parent_id is None
- and trace_metadata["trace_id"] != span.get_span_context().span_id
- ):
+ if trace_parent_id is None and trace_metadata["trace_id"] != span.get_span_context().span_id:
# Parent Log in Trace upload failed
# NOTE: Check if the trace_id metadata field points to the
# span itself. This signifies the span is the head of the Trace
@@ -358,9 +335,7 @@ def _export_flow(self, span: ReadableSpan) -> None:
**log_object,
trace_parent_id=trace_parent_id,
)
- self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = (
- log_response.id
- )
+ self._span_id_to_uploaded_log_id[span.get_span_context().span_id] = log_response.id
except HumanloopApiError as e:
logger.error(str(e))
self._span_id_to_uploaded_log_id[span.context.span_id] = None
From 44285efe563f36fd80ee74b1457fc971a5921f2c Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 16:28:40 +0000
Subject: [PATCH 55/70] mypy error fixes
---
src/humanloop/decorators/tool.py | 22 ++++++----------------
src/humanloop/eval_utils/run.py | 2 +-
2 files changed, 7 insertions(+), 17 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 2f3d41a8..4c582276 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -168,9 +168,7 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
):
- raise ValueError(
- f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator"
- )
+ raise ValueError(f"{func.__name__}: *args and **kwargs are not supported by the @tool decorator")
for parameter in signature.parameters.values():
try:
@@ -337,9 +335,7 @@ def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
# Union has sub_types and is Optional
return _ParsedOptionalAnnotation(
annotation=_ParsedUnionAnnotation(
- annotation=[
- _parse_annotation(sub_type) for sub_type in sub_types[:-1]
- ],
+ annotation=[_parse_annotation(sub_type) for sub_type in sub_types[:-1]],
)
)
# Union type that is not Optional
@@ -373,10 +369,7 @@ def _annotation_parse_to_json_schema(
if isinstance(arg, _ParsedUnionAnnotation):
arg_type = {
- "anyOf": [
- _annotation_parse_to_json_schema(sub_type)
- for sub_type in arg.annotation
- ],
+ "anyOf": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
}
elif isinstance(arg, _ParsedTupleAnnotation):
@@ -391,10 +384,7 @@ def _annotation_parse_to_json_schema(
else:
arg_type = {
"type": "array",
- "items": [
- _annotation_parse_to_json_schema(sub_type)
- for sub_type in arg.annotation
- ],
+ "items": [_annotation_parse_to_json_schema(sub_type) for sub_type in arg.annotation],
}
elif isinstance(arg, _ParsedListAnnotation):
@@ -464,8 +454,8 @@ def _annotation_parse_to_json_schema(
if isinstance(arg, _ParsedUnionAnnotation):
for type_option in arg_type["anyOf"]:
if (
- isinstance(type_option["type"], list)
- and "null" not in type_option["type"]
+ isinstance(type_option["type"], list) # type: ignore
+ and "null" not in type_option["type"] # type: ignore
): # type: ignore
type_option["type"] = [*type_option["type"], "null"] # type: ignore
elif not isinstance(type_option["type"], list): # type: ignore
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index b28a9efd..e156a9d3 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -504,7 +504,7 @@ def upload_callback(log: dict):
logger.debug(
"process_datapoint on Thread %s: function_ %s is a simple callable, context was not consumed",
threading.get_ident(),
- function_.__name__,
+ function_.__name__, # type: ignore
)
log_func(
inputs=datapoint.inputs,
From 95a0901f8ec9181958ad2b174fd514326ba09c0a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 17:02:56 +0000
Subject: [PATCH 56/70] Custom error message for fancy functions decorated with
tool
---
src/humanloop/decorators/tool.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 4c582276..148fcc44 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -116,7 +116,13 @@ def _build_tool_kernel(
strict: bool,
) -> ToolKernelRequestParams:
"""Build ToolKernelRequest object from decorated function."""
- source_code = textwrap.dedent(inspect.getsource(func))
+ try:
+ source_code = textwrap.dedent(inspect.getsource(func))
+ except TypeError as e:
+ raise TypeError(
+ f"Cannot extract source code for function {func.__name__}. "
+ "Try decorating a plain function instead of a partial for example."
+ ) from e
# Remove decorator from source code by finding first 'def'
# This makes the source_code extraction idempotent whether
# the decorator is applied directly or used as a higher-order
From 4640d7b770759f8c118d2a51e5bb3063b5d7ac1a Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 17:59:44 +0000
Subject: [PATCH 57/70] Use KernelParams for decorators
---
.github/workflows/ci.yml | 2 +-
src/humanloop/client.py | 114 +++---------------------
src/humanloop/decorators/flow.py | 19 ++--
src/humanloop/decorators/prompt.py | 51 ++---------
src/humanloop/decorators/tool.py | 24 +++--
src/humanloop/decorators/types.py | 12 +++
src/humanloop/otel/exporter.py | 2 +-
src/humanloop/otel/processor.py | 2 +
tests/decorators/test_flow_decorator.py | 4 +-
tests/decorators/test_tool_decorator.py | 19 ----
10 files changed, 58 insertions(+), 191 deletions(-)
create mode 100644 src/humanloop/decorators/types.py
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e043d505..5ac08dc2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy .
+ run: poetry run mypy . --enable-incomplete-feature=Unpack
test:
runs-on: ubuntu-20.04
steps:
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 50a43cfc..ebc86b78 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,7 +1,8 @@
from contextvars import ContextVar
import os
import typing
-from typing import Any, List, Optional, Sequence
+from typing import List, Optional, Sequence
+from typing_extensions import Unpack
import httpx
from opentelemetry.sdk.resources import Resource
@@ -9,15 +10,8 @@
from opentelemetry.trace import Tracer
from humanloop.core.client_wrapper import SyncClientWrapper
+from humanloop.decorators.types import DecoratorPromptKernelRequestParams
from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext
-from humanloop.types.model_endpoints import ModelEndpoints
-from humanloop.types.model_providers import ModelProviders
-from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
-from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
-from humanloop.types.response_format import ResponseFormat
-
-if typing.TYPE_CHECKING:
- from humanloop import ToolFunctionParams
from humanloop.eval_utils import log_with_evaluation_context, run_eval
from humanloop.eval_utils.types import Dataset, Evaluator, EvaluatorCheck, File
@@ -33,6 +27,8 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
+from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
+from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
class ExtendedEvalsClient(EvaluationsClient):
@@ -177,21 +173,7 @@ def prompt(
self,
*,
path: Optional[str] = None,
- model: Optional[str] = None,
- attributes: Optional[dict[str, Any]] = None,
- endpoint: Optional[ModelEndpoints] = None,
- template: Optional[PromptKernelRequestTemplate] = None,
- provider: Optional[ModelProviders] = None,
- max_tokens: Optional[int] = None,
- temperature: Optional[float] = None,
- top_p: Optional[float] = None,
- stop: Optional[PromptKernelRequestStop] = None,
- presence_penalty: Optional[float] = None,
- frequency_penalty: Optional[float] = None,
- other: Optional[dict[str, Optional[Any]]] = None,
- seed: Optional[int] = None,
- response_format: Optional[ResponseFormat] = None,
- tools: Optional[Sequence["ToolFunctionParams"]] = None,
+ **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams],
):
"""Decorator for declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code.
@@ -258,79 +240,19 @@ def call_llm(messages):
provided, the function name is used as the path and the File
is created in the root of your Humanloop organization workspace.
- :param model: Name of the model used by the Prompt.
-
- :param endpoint: The model instance used, e.g. `gpt-4`. See
- [supported models](https://humanloop.com/docs/reference/supported-models)
-
- :param template: The template for the Prompt. This is the text of
- the system message used to set the LLM prompt. The template
- accepts template slots using the format `{{slot_name}}`.
-
- :param provider: The company providing the underlying model service.
-
- :param max_tokens: Maximum number of tokens used in generation.
-
- :param temperature: What sampling temperature to use
- when making a generation. Higher values means the model
- will be more creative.
-
- :param top_p: An alternative to sampling with temperature,
- called nucleus sampling, where the model considers the results
- of the tokens with top_p probability mass.
-
- :param stop: Token or list of tokens that stop generation
-
- :param presence_penalty: Number between -2.0 and 2.0.
- Positive values penalize new tokens based on whether they
- appear in the generation so far.
-
- :param frequency_penalty: Number between -2.0 and 2.0. Positive
- values penalize new tokens based on how frequently they
- appear in the generation so far.
-
- :param other: Other parameter values to be passed to the provider call.
-
- :param seed: If specified, model will make a best effort to
- sample deterministically, but it is not guaranteed.
-
- :param response_format: The format of the response.
- Only `{"type": "json_object"}` is currently supported
- for chat.
-
- :param attributes: Additional fields to describe the Prompt. Helpful to
- separate Prompt versions from each other with details on how they
- were created or used.
-
- :param tools: The tool specification that the model can choose to call if Tool
- calling is supported.
+ :param prompt_kernel: Attributes that define the Prompt. See `class:DecoratorPromptKernelRequestParams`
"""
return prompt_decorator_factory(
opentelemetry_tracer=self._opentelemetry_tracer,
path=path,
- model=model,
- attributes=attributes,
- endpoint=endpoint,
- template=template,
- provider=provider,
- max_tokens=max_tokens,
- stop=stop,
- temperature=temperature,
- top_p=top_p,
- presence_penalty=presence_penalty,
- frequency_penalty=frequency_penalty,
- other=other,
- seed=seed,
- response_format=response_format,
- tools=tools,
+ **prompt_kernel,
)
def tool(
self,
*,
path: Optional[str] = None,
- setup_values: Optional[dict[str, Optional[Any]]] = None,
- attributes: Optional[dict[str, Optional[Any]]] = None,
+ **tool_kernel: Unpack[ToolKernelRequestParams],
):
"""Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code.
@@ -391,25 +313,19 @@ def calculator(a: int, b: Optional[int]) -> int:
will be used as the path and the File will be created in the root
of your organization's workspace.
- :param setup_values: Values needed to setup the Tool, defined in
- JSON Schema format: https://json-schema.org/
-
- :param attributes: Additional fields to describe the Tool.
- Helpful to separate Tool versions from each other
- with details on how they were created or used.
+ :param tool_kernel: Attributes that define the Tool. See `class:ToolKernelRequestParams`
"""
return tool_decorator_factory(
opentelemetry_tracer=self._opentelemetry_tracer,
path=path,
- setup_values=setup_values,
- attributes=attributes,
+ **tool_kernel,
)
def flow(
self,
*,
path: Optional[str] = None,
- attributes: Optional[dict[str, typing.Any]] = None,
+ **flow_kernel: Unpack[FlowKernelRequestParams],
):
"""Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code.
@@ -453,14 +369,12 @@ def entrypoint():
will be used as the path and the File will be created in the root
of your organization workspace.
- :param attributes: A key-value object identifying the Flow Version.
+ :param flow_kernel: Attributes that define the Flow. See `class:ToolKernelRequestParams`
"""
- if attributes is None:
- attributes = {}
return flow_decorator_factory(
opentelemetry_tracer=self._opentelemetry_tracer,
path=path,
- attributes=attributes,
+ **flow_kernel,
)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 2cb3d2ad..bca9b23a 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,12 +1,10 @@
import logging
-import os
-import sys
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
-from opentelemetry.util.types import AttributeValue
+from typing_extensions import Unpack
from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils.types import File
@@ -14,6 +12,7 @@
from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
from humanloop.requests import FlowKernelRequestParams as FlowDict
+from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
logger = logging.getLogger("humanloop.sdk")
@@ -21,11 +20,9 @@
def flow(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
- attributes: Optional[dict[str, AttributeValue]] = None,
+ **flow_kernel: Unpack[FlowKernelRequestParams],
):
- if attributes is None:
- attributes = {}
- attributes = {k: v for k, v in attributes.items() if v is not None}
+ flow_kernel["attributes"] = {k: v for k, v in flow_kernel.get("attributes", {}).items() if v is not None}
def decorator(func: Callable):
@wraps(func)
@@ -54,11 +51,11 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__)
span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "flow")
- if attributes:
+ if flow_kernel:
write_to_opentelemetry_span(
span=span,
- key=f"{HUMANLOOP_FILE_KEY}.flow.attributes",
- value=attributes, # type: ignore
+ key=f"{HUMANLOOP_FILE_KEY}.flow",
+ value=flow_kernel, # type: ignore
)
inputs = args_to_inputs(func, args, kwargs)
@@ -96,7 +93,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="flow",
- version=FlowDict(attributes=attributes), # type: ignore
+ version=FlowDict(**flow_kernel), # type: ignore
callable=wrapper,
)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 5ab10c4f..ba29b5b9 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -1,23 +1,17 @@
import logging
-import typing
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
from opentelemetry.sdk.trace import Span
from opentelemetry.trace import Tracer
+from typing_extensions import Unpack
-if typing.TYPE_CHECKING:
- from humanloop import ToolFunctionParams
from humanloop.decorators.helpers import args_to_inputs
+from humanloop.decorators.types import DecoratorPromptKernelRequestParams
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
-from humanloop.types.model_endpoints import ModelEndpoints
-from humanloop.types.model_providers import ModelProviders
-from humanloop.types.prompt_kernel_request_stop import PromptKernelRequestStop
-from humanloop.types.prompt_kernel_request_template import PromptKernelRequestTemplate
-from humanloop.types.response_format import ResponseFormat
logger = logging.getLogger("humanloop.sdk")
@@ -26,43 +20,9 @@ def prompt(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
# TODO: Template can be a list of objects?
- model: Optional[str] = None,
- attributes: Optional[dict[str, Any]] = None,
- endpoint: Optional[ModelEndpoints] = None,
- template: Optional[PromptKernelRequestTemplate] = None,
- provider: Optional[ModelProviders] = None,
- max_tokens: Optional[int] = None,
- stop: Optional[PromptKernelRequestStop] = None,
- temperature: Optional[float] = None,
- top_p: Optional[float] = None,
- presence_penalty: Optional[float] = None,
- frequency_penalty: Optional[float] = None,
- other: Optional[dict[str, Optional[Any]]] = None,
- seed: Optional[int] = None,
- response_format: Optional[ResponseFormat] = None,
- tools: Optional[Sequence["ToolFunctionParams"]] = None,
+ **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams],
):
def decorator(func: Callable):
- prompt_kernel = {}
- for attr_name, attr_value in {
- "model": model,
- "endpoint": endpoint,
- "template": template,
- "provider": provider,
- "max_tokens": max_tokens,
- "stop": stop,
- "other": other,
- "seed": seed,
- "response_format": response_format,
- "attributes": attributes or None,
- "tools": tools,
- "temperature": temperature,
- "top_p": top_p,
- "presence_penalty": presence_penalty,
- "frequency_penalty": frequency_penalty,
- }.items():
- prompt_kernel[attr_name] = attr_value # type: ignore
-
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span: Span
@@ -85,7 +45,10 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
write_to_opentelemetry_span(
span=span,
key=f"{HUMANLOOP_FILE_KEY}.prompt",
- value=prompt_kernel, # type: ignore
+ value={
+ **prompt_kernel, # type: ignore
+ "attributes": prompt_kernel.get("attributes") or None, # type: ignore
+ }, # type: ignore
)
# Call the decorated function
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 148fcc44..eb2c1940 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -9,7 +9,9 @@
from typing import Any, Callable, Literal, Mapping, Optional, Sequence, TypedDict, Union
from opentelemetry.trace import Tracer
+from typing_extensions import Unpack
+from humanloop.decorators.helpers import args_to_inputs
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import (
@@ -22,28 +24,24 @@
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
-from humanloop.decorators.helpers import args_to_inputs
-
logger = logging.getLogger("humanloop.sdk")
def tool(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
- setup_values: Optional[dict[str, Optional[Any]]] = None,
- attributes: Optional[dict[str, typing.Any]] = None,
- strict: bool = True,
+ **tool_kernel: Unpack[ToolKernelRequestParams],
):
def decorator(func: Callable):
- tool_kernel = _build_tool_kernel(
+ enhanced_tool_kernel = _build_tool_kernel(
func=func,
- attributes=attributes,
- setup_values=setup_values,
- strict=strict,
+ attributes=tool_kernel.get("attributes"),
+ setup_values=tool_kernel.get("setup_values"),
+ strict=True,
)
# Mypy complains about adding attribute on function, but it's nice UX
- func.json_schema = tool_kernel["function"] # type: ignore
+ func.json_schema = enhanced_tool_kernel["function"] # type: ignore
@wraps(func)
def wrapper(*args, **kwargs):
@@ -64,11 +62,11 @@ def wrapper(*args, **kwargs):
# Write the Tool Kernel to the Span on HL_FILE_OT_KEY
span.set_attribute(HUMANLOOP_PATH_KEY, path if path else func.__name__)
span.set_attribute(HUMANLOOP_FILE_TYPE_KEY, "tool")
- if tool_kernel:
+ if enhanced_tool_kernel:
write_to_opentelemetry_span(
span=span,
key=f"{HUMANLOOP_FILE_KEY}.tool",
- value=tool_kernel,
+ value=enhanced_tool_kernel,
)
# Call the decorated function
@@ -100,7 +98,7 @@ def wrapper(*args, **kwargs):
wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="tool",
- version=tool_kernel,
+ version=enhanced_tool_kernel,
callable=wrapper,
)
diff --git a/src/humanloop/decorators/types.py b/src/humanloop/decorators/types.py
new file mode 100644
index 00000000..f52f0178
--- /dev/null
+++ b/src/humanloop/decorators/types.py
@@ -0,0 +1,12 @@
+from typing_extensions import NotRequired
+
+from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams
+
+
+class DecoratorPromptKernelRequestParams(PromptKernelRequestParams):
+ """See :class:`PromptKernelRequestParams` for more information.
+
+ Allows the `model` field to be optional for Prompt decorator.
+ """
+
+ model: NotRequired[str] # type: ignore
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index cfcf356a..9cc4e680 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -1,9 +1,9 @@
import contextvars
+import copy
import json
import logging
import threading
import typing
-import copy
from queue import Empty as EmptyQueue
from queue import Queue
from threading import Thread
diff --git a/src/humanloop/otel/processor.py b/src/humanloop/otel/processor.py
index 14fd7501..5caa5c80 100644
--- a/src/humanloop/otel/processor.py
+++ b/src/humanloop/otel/processor.py
@@ -114,6 +114,8 @@ def _enrich_prompt_kernel(prompt_span: ReadableSpan, llm_provider_call_span: Rea
# via the @prompt arguments. Otherwise, use the information
# from the intercepted LLM provider call
prompt["model"] = prompt.get("model") or gen_ai_object.get("request", {}).get("model", None)
+ if prompt["model"] is None:
+ raise ValueError("Could not infer required parameter `model`. Please provide it in the @prompt decorator.")
prompt["endpoint"] = prompt.get("endpoint") or llm_object.get("request", {}).get("type")
prompt["provider"] = prompt.get("provider") or gen_ai_object.get("system", None)
if prompt["provider"]:
diff --git a/tests/decorators/test_flow_decorator.py b/tests/decorators/test_flow_decorator.py
index 13764da8..09a769f6 100644
--- a/tests/decorators/test_flow_decorator.py
+++ b/tests/decorators/test_flow_decorator.py
@@ -31,7 +31,7 @@ def _random_string() -> str:
)
)
- @prompt(
+ @prompt( # type: ignore
opentelemetry_tracer=opentelemetry_tracer,
path=None,
template="You are an assistant on the following topics: {topics}.",
@@ -55,7 +55,7 @@ def _call_llm(messages: list[ChatCompletionMessageParam]) -> str:
def _agent_call(messages: list[dict]) -> str:
return _call_llm(messages=messages)
- @flow(
+ @flow( # type: ignore
opentelemetry_tracer=opentelemetry_tracer,
)
def _flow_over_flow(messages: list[dict]) -> str:
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 0e4cc0c8..d9f133ee 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -360,25 +360,6 @@ def foo(a: Optional[tuple[int, Optional[str], float]]):
Validator.check_schema(foo.json_schema)
-def test_strict_false(
- opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
-):
- # GIVEN an OTel configuration
- tracer, _ = opentelemetry_test_configuration
-
- # GIVEN a tool definition with strict=False
- # WHEN building the Tool Kernel
- @tool(opentelemetry_tracer=tracer, strict=False)
- def foo(a: int, b: int) -> int:
- return a + b
-
- # THEN the JSON schema is correctly built
- assert foo.json_schema["strict"] is False
-
- # THEN the JSONSchema is valid
- Validator.check_schema(foo.json_schema)
-
-
def test_tool_no_args(
opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
From 4704ce20af336f00c35b5a6c0dfb135e02991b97 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 18:17:00 +0000
Subject: [PATCH 58/70] unpack poetry mypy
---
.github/workflows/ci.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5ac08dc2..e043d505 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy . --enable-incomplete-feature=Unpack
+ run: poetry run mypy .
test:
runs-on: ubuntu-20.04
steps:
From 2803248243fc2518e633040ec01a4a21037d12d2 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 18:19:44 +0000
Subject: [PATCH 59/70] poetry pipeline check
---
.github/workflows/ci.yml | 2 +-
poetry.lock | 88 +++++++++++++++++++++++++---------------
pyproject.toml | 2 +-
3 files changed, 58 insertions(+), 34 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e043d505..ba22d14c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy .
+ run: poetry run mypy --enable-incomplete-feature=Unpack --show-traceback .
test:
runs-on: ubuntu-20.04
steps:
diff --git a/poetry.lock b/poetry.lock
index 70b743b5..490d4dad 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -666,48 +666,48 @@ referencing = ">=0.31.0"
[[package]]
name = "mypy"
-version = "1.0.1"
+version = "1.6.0"
description = "Optional static typing for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
- {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
- {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
- {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
- {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
- {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
- {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
- {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
- {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
- {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
- {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
- {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
- {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
- {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
- {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
- {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
- {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
- {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
- {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
- {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
+ {file = "mypy-1.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:091f53ff88cb093dcc33c29eee522c087a438df65eb92acd371161c1f4380ff0"},
+ {file = "mypy-1.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb7ff4007865833c470a601498ba30462b7374342580e2346bf7884557e40531"},
+ {file = "mypy-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49499cf1e464f533fc45be54d20a6351a312f96ae7892d8e9f1708140e27ce41"},
+ {file = "mypy-1.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c192445899c69f07874dabda7e931b0cc811ea055bf82c1ababf358b9b2a72c"},
+ {file = "mypy-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:3df87094028e52766b0a59a3e46481bb98b27986ed6ded6a6cc35ecc75bb9182"},
+ {file = "mypy-1.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c8835a07b8442da900db47ccfda76c92c69c3a575872a5b764332c4bacb5a0a"},
+ {file = "mypy-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:24f3de8b9e7021cd794ad9dfbf2e9fe3f069ff5e28cb57af6f873ffec1cb0425"},
+ {file = "mypy-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:856bad61ebc7d21dbc019b719e98303dc6256cec6dcc9ebb0b214b81d6901bd8"},
+ {file = "mypy-1.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89513ddfda06b5c8ebd64f026d20a61ef264e89125dc82633f3c34eeb50e7d60"},
+ {file = "mypy-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f8464ed410ada641c29f5de3e6716cbdd4f460b31cf755b2af52f2d5ea79ead"},
+ {file = "mypy-1.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:971104bcb180e4fed0d7bd85504c9036346ab44b7416c75dd93b5c8c6bb7e28f"},
+ {file = "mypy-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ab98b8f6fdf669711f3abe83a745f67f50e3cbaea3998b90e8608d2b459fd566"},
+ {file = "mypy-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a69db3018b87b3e6e9dd28970f983ea6c933800c9edf8c503c3135b3274d5ad"},
+ {file = "mypy-1.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:dccd850a2e3863891871c9e16c54c742dba5470f5120ffed8152956e9e0a5e13"},
+ {file = "mypy-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:f8598307150b5722854f035d2e70a1ad9cc3c72d392c34fffd8c66d888c90f17"},
+ {file = "mypy-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fea451a3125bf0bfe716e5d7ad4b92033c471e4b5b3e154c67525539d14dc15a"},
+ {file = "mypy-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e28d7b221898c401494f3b77db3bac78a03ad0a0fff29a950317d87885c655d2"},
+ {file = "mypy-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4b7a99275a61aa22256bab5839c35fe8a6887781862471df82afb4b445daae6"},
+ {file = "mypy-1.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7469545380dddce5719e3656b80bdfbb217cfe8dbb1438532d6abc754b828fed"},
+ {file = "mypy-1.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:7807a2a61e636af9ca247ba8494031fb060a0a744b9fee7de3a54bed8a753323"},
+ {file = "mypy-1.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2dad072e01764823d4b2f06bc7365bb1d4b6c2f38c4d42fade3c8d45b0b4b67"},
+ {file = "mypy-1.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b19006055dde8a5425baa5f3b57a19fa79df621606540493e5e893500148c72f"},
+ {file = "mypy-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31eba8a7a71f0071f55227a8057468b8d2eb5bf578c8502c7f01abaec8141b2f"},
+ {file = "mypy-1.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e0db37ac4ebb2fee7702767dfc1b773c7365731c22787cb99f507285014fcaf"},
+ {file = "mypy-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:c69051274762cccd13498b568ed2430f8d22baa4b179911ad0c1577d336ed849"},
+ {file = "mypy-1.6.0-py3-none-any.whl", hash = "sha256:9e1589ca150a51d9d00bb839bfeca2f7a04f32cd62fad87a847bc0818e15d7dc"},
+ {file = "mypy-1.6.0.tar.gz", hash = "sha256:4f3d27537abde1be6d5f2c96c29a454da333a2a271ae7d5bc7110e6d4b7beb3f"},
]
[package.dependencies]
-mypy-extensions = ">=0.4.3"
+mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = ">=3.10"
+typing-extensions = ">=4.1.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
install-types = ["pip"]
-python2 = ["typed-ast (>=1.4.0,<2)"]
reports = ["lxml"]
[[package]]
@@ -1684,6 +1684,30 @@ files = [
{file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"},
{file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"},
{file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68cb0a499f2c4a088fd2f521453e22ed3527154136a855c62e148b7883b99f9a"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3060d885657abc549b2a0f8e1b79699290e5d83845141717c6c90c2df38311"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95f3b65d2392e1c5cec27cff08fdc0080270d5a1a4b2ea1d51d5f4a2620ff08d"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cc3712a4b0b76a1d45a9302dd2f53ff339614b1c29603a911318f2357b04dd2"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d4eea0761e37485c9b81400437adb11c40e13ef513375bbd6973e34100aeb06"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f5179583d7a6cdb981151dd349786cbc318bab54963a192692d945dd3f6435d"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fbb0ffc754490aff6dabbf28064be47f0f9ca0b9755976f945214965b3ace7e"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a94e52537a0e0a85429eda9e49f272ada715506d3b2431f64b8a3e34eb5f3e75"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:92b68b79c0da2a980b1c4197e56ac3dd0c8a149b4603747c4378914a68706979"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:93da1d3db08a827eda74356f9f58884adb254e59b6664f64cc04cdff2cc19b0d"},
+ {file = "rpds_py-0.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:754bbed1a4ca48479e9d4182a561d001bbf81543876cdded6f695ec3d465846b"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca449520e7484534a2a44faf629362cae62b660601432d04c482283c47eaebab"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9c4cb04a16b0f199a8c9bf807269b2f63b7b5b11425e4a6bd44bd6961d28282c"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb63804105143c7e24cee7db89e37cb3f3941f8e80c4379a0b355c52a52b6780"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55cd1fa4ecfa6d9f14fbd97ac24803e6f73e897c738f771a9fe038f2f11ff07c"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f8f741b6292c86059ed175d80eefa80997125b7c478fb8769fd9ac8943a16c0"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fc212779bf8411667234b3cdd34d53de6c2b8b8b958e1e12cb473a5f367c338"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ad56edabcdb428c2e33bbf24f255fe2b43253b7d13a2cdbf05de955217313e6"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a3a1e9ee9728b2c1734f65d6a1d376c6f2f6fdcc13bb007a08cc4b1ff576dc5"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e13de156137b7095442b288e72f33503a469aa1980ed856b43c353ac86390519"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:07f59760ef99f31422c49038964b31c4dfcfeb5d2384ebfc71058a7c9adae2d2"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:59240685e7da61fb78f65a9f07f8108e36a83317c53f7b276b4175dc44151684"},
+ {file = "rpds_py-0.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:83cba698cfb3c2c5a7c3c6bac12fe6c6a51aae69513726be6411076185a8b24a"},
+ {file = "rpds_py-0.20.1.tar.gz", hash = "sha256:e1791c4aabd117653530dccd24108fa03cc6baf21f58b950d0a73c3b3b29a350"},
]
[[package]]
@@ -2129,4 +2153,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "3bfca7f42f99c638b20663590b8c958ea2569694da45c8a849e10287d398518c"
+content-hash = "6b798093464f2fa81b87adef999e226306dbf590f7afa6ceb0f6ed7af7d6b1de"
diff --git a/pyproject.toml b/pyproject.toml
index 56e4f776..b5709745 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -54,7 +54,7 @@ cohere = "^5.11.2"
replicate = "^1.0.3"
jsonschema = "^4.23.0"
types-jsonschema = "^4.23.0.20240813"
-mypy = "1.0.1"
+mypy = "1.6.0"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
python-dateutil = "^2.8.2"
From 2ad919b05a1ccd9817ee9329e22f4d3ac21c8db2 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 18:38:27 +0000
Subject: [PATCH 60/70] mypy debacle
---
.github/workflows/ci.yml | 2 +-
poetry.lock | 64 ++++++++++++++++++++--------------------
pyproject.toml | 2 +-
3 files changed, 34 insertions(+), 34 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ba22d14c..5ac08dc2 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy --enable-incomplete-feature=Unpack --show-traceback .
+ run: poetry run mypy . --enable-incomplete-feature=Unpack
test:
runs-on: ubuntu-20.04
steps:
diff --git a/poetry.lock b/poetry.lock
index 490d4dad..db873d0a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -666,48 +666,48 @@ referencing = ">=0.31.0"
[[package]]
name = "mypy"
-version = "1.6.0"
+version = "1.0.1"
description = "Optional static typing for Python"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.7"
files = [
- {file = "mypy-1.6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:091f53ff88cb093dcc33c29eee522c087a438df65eb92acd371161c1f4380ff0"},
- {file = "mypy-1.6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb7ff4007865833c470a601498ba30462b7374342580e2346bf7884557e40531"},
- {file = "mypy-1.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49499cf1e464f533fc45be54d20a6351a312f96ae7892d8e9f1708140e27ce41"},
- {file = "mypy-1.6.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c192445899c69f07874dabda7e931b0cc811ea055bf82c1ababf358b9b2a72c"},
- {file = "mypy-1.6.0-cp310-cp310-win_amd64.whl", hash = "sha256:3df87094028e52766b0a59a3e46481bb98b27986ed6ded6a6cc35ecc75bb9182"},
- {file = "mypy-1.6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c8835a07b8442da900db47ccfda76c92c69c3a575872a5b764332c4bacb5a0a"},
- {file = "mypy-1.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:24f3de8b9e7021cd794ad9dfbf2e9fe3f069ff5e28cb57af6f873ffec1cb0425"},
- {file = "mypy-1.6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:856bad61ebc7d21dbc019b719e98303dc6256cec6dcc9ebb0b214b81d6901bd8"},
- {file = "mypy-1.6.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:89513ddfda06b5c8ebd64f026d20a61ef264e89125dc82633f3c34eeb50e7d60"},
- {file = "mypy-1.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:9f8464ed410ada641c29f5de3e6716cbdd4f460b31cf755b2af52f2d5ea79ead"},
- {file = "mypy-1.6.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:971104bcb180e4fed0d7bd85504c9036346ab44b7416c75dd93b5c8c6bb7e28f"},
- {file = "mypy-1.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ab98b8f6fdf669711f3abe83a745f67f50e3cbaea3998b90e8608d2b459fd566"},
- {file = "mypy-1.6.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a69db3018b87b3e6e9dd28970f983ea6c933800c9edf8c503c3135b3274d5ad"},
- {file = "mypy-1.6.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:dccd850a2e3863891871c9e16c54c742dba5470f5120ffed8152956e9e0a5e13"},
- {file = "mypy-1.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:f8598307150b5722854f035d2e70a1ad9cc3c72d392c34fffd8c66d888c90f17"},
- {file = "mypy-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fea451a3125bf0bfe716e5d7ad4b92033c471e4b5b3e154c67525539d14dc15a"},
- {file = "mypy-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e28d7b221898c401494f3b77db3bac78a03ad0a0fff29a950317d87885c655d2"},
- {file = "mypy-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4b7a99275a61aa22256bab5839c35fe8a6887781862471df82afb4b445daae6"},
- {file = "mypy-1.6.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7469545380dddce5719e3656b80bdfbb217cfe8dbb1438532d6abc754b828fed"},
- {file = "mypy-1.6.0-cp38-cp38-win_amd64.whl", hash = "sha256:7807a2a61e636af9ca247ba8494031fb060a0a744b9fee7de3a54bed8a753323"},
- {file = "mypy-1.6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d2dad072e01764823d4b2f06bc7365bb1d4b6c2f38c4d42fade3c8d45b0b4b67"},
- {file = "mypy-1.6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b19006055dde8a5425baa5f3b57a19fa79df621606540493e5e893500148c72f"},
- {file = "mypy-1.6.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31eba8a7a71f0071f55227a8057468b8d2eb5bf578c8502c7f01abaec8141b2f"},
- {file = "mypy-1.6.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8e0db37ac4ebb2fee7702767dfc1b773c7365731c22787cb99f507285014fcaf"},
- {file = "mypy-1.6.0-cp39-cp39-win_amd64.whl", hash = "sha256:c69051274762cccd13498b568ed2430f8d22baa4b179911ad0c1577d336ed849"},
- {file = "mypy-1.6.0-py3-none-any.whl", hash = "sha256:9e1589ca150a51d9d00bb839bfeca2f7a04f32cd62fad87a847bc0818e15d7dc"},
- {file = "mypy-1.6.0.tar.gz", hash = "sha256:4f3d27537abde1be6d5f2c96c29a454da333a2a271ae7d5bc7110e6d4b7beb3f"},
+ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
+ {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
+ {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
+ {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
+ {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
+ {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
+ {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
+ {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
+ {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
+ {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
+ {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
+ {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
+ {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
+ {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
+ {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
+ {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
+ {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
+ {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
+ {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
+ {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
+ {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
+ {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
+ {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
]
[package.dependencies]
-mypy-extensions = ">=1.0.0"
+mypy-extensions = ">=0.4.3"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = ">=4.1.0"
+typing-extensions = ">=3.10"
[package.extras]
dmypy = ["psutil (>=4.0)"]
install-types = ["pip"]
+python2 = ["typed-ast (>=1.4.0,<2)"]
reports = ["lxml"]
[[package]]
@@ -2153,4 +2153,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "6b798093464f2fa81b87adef999e226306dbf590f7afa6ceb0f6ed7af7d6b1de"
+content-hash = "3bfca7f42f99c638b20663590b8c958ea2569694da45c8a849e10287d398518c"
diff --git a/pyproject.toml b/pyproject.toml
index b5709745..c19de0fb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -54,7 +54,7 @@ cohere = "^5.11.2"
replicate = "^1.0.3"
jsonschema = "^4.23.0"
types-jsonschema = "^4.23.0.20240813"
-mypy = "1.6.0"
+mypy = "^1.6.0"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
python-dateutil = "^2.8.2"
From cdbe07028d2c94650faf6925ff9ea7ba80815850 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 11 Nov 2024 18:57:25 +0000
Subject: [PATCH 61/70] mypy unpack ignore statements
---
.github/workflows/ci.yml | 2 +-
pyproject.toml | 2 +-
src/humanloop/client.py | 6 +++---
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/decorators/prompt.py | 2 +-
src/humanloop/decorators/tool.py | 2 +-
6 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 5ac08dc2..e043d505 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -17,7 +17,7 @@ jobs:
- name: Install dependencies
run: poetry install
- name: Compile
- run: poetry run mypy . --enable-incomplete-feature=Unpack
+ run: poetry run mypy .
test:
runs-on: ubuntu-20.04
steps:
diff --git a/pyproject.toml b/pyproject.toml
index c19de0fb..4721487f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -54,7 +54,7 @@ cohere = "^5.11.2"
replicate = "^1.0.3"
jsonschema = "^4.23.0"
types-jsonschema = "^4.23.0.20240813"
-mypy = "^1.6.0"
+mypy = "^1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
python-dateutil = "^2.8.2"
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index ebc86b78..725d910f 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -173,7 +173,7 @@ def prompt(
self,
*,
path: Optional[str] = None,
- **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams],
+ **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore
):
"""Decorator for declaring a (Prompt)[https://humanloop.com/docs/explanation/prompts] in code.
@@ -252,7 +252,7 @@ def tool(
self,
*,
path: Optional[str] = None,
- **tool_kernel: Unpack[ToolKernelRequestParams],
+ **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore
):
"""Decorator for declaring a [Tool](https://humanloop.com/docs/explanation/tools) in code.
@@ -325,7 +325,7 @@ def flow(
self,
*,
path: Optional[str] = None,
- **flow_kernel: Unpack[FlowKernelRequestParams],
+ **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore
):
"""Decorator for declaring a [Flow](https://humanloop.com/docs/explanation/flows) in code.
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index bca9b23a..340be60a 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -20,7 +20,7 @@
def flow(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
- **flow_kernel: Unpack[FlowKernelRequestParams],
+ **flow_kernel: Unpack[FlowKernelRequestParams], # type: ignore
):
flow_kernel["attributes"] = {k: v for k, v in flow_kernel.get("attributes", {}).items() if v is not None}
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index ba29b5b9..2390fb65 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -20,7 +20,7 @@ def prompt(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
# TODO: Template can be a list of objects?
- **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams],
+ **prompt_kernel: Unpack[DecoratorPromptKernelRequestParams], # type: ignore
):
def decorator(func: Callable):
@wraps(func)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index eb2c1940..f75596c3 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -30,7 +30,7 @@
def tool(
opentelemetry_tracer: Tracer,
path: Optional[str] = None,
- **tool_kernel: Unpack[ToolKernelRequestParams],
+ **tool_kernel: Unpack[ToolKernelRequestParams], # type: ignore
):
def decorator(func: Callable):
enhanced_tool_kernel = _build_tool_kernel(
From 4d854ed66dfecd0b8c0d08349b676fb4f7bf060b Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 11:48:08 +0000
Subject: [PATCH 62/70] Rebase changes
---
poetry.lock | 2 +-
src/humanloop/eval_utils/run.py | 6 ++----
2 files changed, 3 insertions(+), 5 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index db873d0a..483d2160 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -2153,4 +2153,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.9"
-content-hash = "3bfca7f42f99c638b20663590b8c958ea2569694da45c8a849e10287d398518c"
+content-hash = "7885364fa002fb5d2dbb37bc97ebd71bf409573eddec0a520d5c370f89d8b9aa"
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index e156a9d3..6d2eda4e 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -24,8 +24,6 @@
from logging import INFO
from typing import Callable, Dict, List, Literal, Optional, Sequence, Tuple, TypeVar, Union
-from pydantic import ValidationError
-
from humanloop import EvaluatorResponse, FlowResponse, PromptResponse, ToolResponse
from humanloop.core.api_error import ApiError
from humanloop.eval_utils.context import EvaluationContext
@@ -59,6 +57,7 @@
from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
from humanloop.types.evaluation_run_response import EvaluationRunResponse
from humanloop.types.run_stats_response import RunStatsResponse
+from humanloop.types.validation_error import ValidationError
if typing.TYPE_CHECKING:
from humanloop.client import BaseHumanloop
@@ -305,7 +304,6 @@ def run_eval(
file_dict = {**file_, **version}
hl_file: Union[PromptResponse, FlowResponse, ToolResponse, EvaluatorResponse]
- # NOTE: This could be cleaner, use polymorphism to avoid the if-else
if type_ == "flow":
# Be more lenient with Flow versions as they are arbitrary json
try:
@@ -716,7 +714,7 @@ def _check_evaluation_improvement(
return True, 0, 0
previous_evaluator_stats_by_path = _get_evaluator_stats_by_path(
- stat=stats.run_stats[1],
+ stat=stats.run_stats[1], # Latest Run is at index 0; previous Run is at index 1
evaluation=evaluation,
)
if evaluator_path in latest_evaluator_stats_by_path and evaluator_path in previous_evaluator_stats_by_path:
From 649718bd8137700b14b5ca8b68ce4c252156e50d Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 12:17:27 +0000
Subject: [PATCH 63/70] jsonify output of decorators if necessary
---
src/humanloop/client.py | 19 +++++++++++++++++++
src/humanloop/decorators/flow.py | 7 ++++++-
src/humanloop/decorators/prompt.py | 6 +++++-
src/humanloop/decorators/tool.py | 8 ++++++--
src/humanloop/otel/helpers.py | 12 +++++++++++-
5 files changed, 47 insertions(+), 5 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 725d910f..66e7d7cc 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -236,6 +236,12 @@ def call_llm(messages):
}
```
+ The decorated function should return a string or the output should be JSON serializable. If
+ the output cannot be serialized, TypeError will be raised.
+
+ If the function raises an exception, the log created by the function will have the output
+ field set to None and the error field set to the string representation of the exception.
+
:param path: The path where the Prompt is created. If not
provided, the function name is used as the path and the File
is created in the root of your Humanloop organization workspace.
@@ -308,6 +314,13 @@ def calculator(a: int, b: Optional[int]) -> int:
},
"output": 3
}
+ ```
+
+ The decorated function should return a string or the output should be JSON serializable. If
+ the output cannot be serialized, TypeError will be raised.
+
+ If the function raises an exception, the log created by the function will have the output
+ field set to None and the error field set to the string representation of the exception.
:param path: The path to the Tool. If not provided, the function name
will be used as the path and the File will be created in the root
@@ -365,6 +378,12 @@ def entrypoint():
will be nested, allowing you to track the whole conversation session
between the user and the assistant.
+ The decorated function should return a string or the output should be JSON serializable. If
+ the output cannot be serialized, TypeError will be raised.
+
+ If the function raises an exception, the log created by the function will have the output
+ field set to None and the error field set to the string representation of the exception.
+
:param path: The path to the Flow. If not provided, the function name
will be used as the path and the File will be created in the root
of your organization workspace.
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index 340be60a..e39a61e7 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,3 +1,4 @@
+import json
import logging
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
@@ -10,7 +11,7 @@
from humanloop.eval_utils.types import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
-from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
+from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span
from humanloop.requests import FlowKernelRequestParams as FlowDict
from humanloop.requests.flow_kernel_request import FlowKernelRequestParams
@@ -63,6 +64,10 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Call the decorated function
try:
output = func(*args, **kwargs)
+ output = jsonify_if_not_string(
+ func=func,
+ output=output,
+ )
error = None
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 2390fb65..2d1eb569 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -11,7 +11,7 @@
from humanloop.eval_utils import File
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import HUMANLOOP_FILE_KEY, HUMANLOOP_FILE_TYPE_KEY, HUMANLOOP_LOG_KEY, HUMANLOOP_PATH_KEY
-from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
+from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span
logger = logging.getLogger("humanloop.sdk")
@@ -54,6 +54,10 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Call the decorated function
try:
output = func(*args, **kwargs)
+ output = jsonify_if_not_string(
+ func=func,
+ output=output,
+ )
error = None
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index f75596c3..bfc3e786 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -20,7 +20,7 @@
HUMANLOOP_LOG_KEY,
HUMANLOOP_PATH_KEY,
)
-from humanloop.otel.helpers import generate_span_id, write_to_opentelemetry_span
+from humanloop.otel.helpers import generate_span_id, jsonify_if_not_string, write_to_opentelemetry_span
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
@@ -72,6 +72,10 @@ def wrapper(*args, **kwargs):
# Call the decorated function
try:
output = func(*args, **kwargs)
+ output = jsonify_if_not_string(
+ func=func,
+ output=output,
+ )
error = None
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
@@ -178,7 +182,7 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
try:
parameter_signature = _parse_annotation(parameter.annotation)
except ValueError as e:
- raise ValueError(f"{func.__name__}: {e.args[0]}") from e
+ raise ValueError(f"Error parsing signature of @tool annotated function {func.__name__}: {e}") from e
param_json_schema = _annotation_parse_to_json_schema(parameter_signature)
properties[parameter.name] = param_json_schema
if not _parameter_is_optional(parameter):
diff --git a/src/humanloop/otel/helpers.py b/src/humanloop/otel/helpers.py
index 6e5baf11..d25a5674 100644
--- a/src/humanloop/otel/helpers.py
+++ b/src/humanloop/otel/helpers.py
@@ -1,5 +1,6 @@
+import json
import uuid
-from typing import Union
+from typing import Any, Callable, Union
from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.trace import SpanKind
@@ -289,3 +290,12 @@ def module_is_installed(module_name: str) -> bool:
def generate_span_id() -> str:
return str(uuid.uuid4())
+
+
+def jsonify_if_not_string(func: Callable, output: Any) -> str:
+ if not isinstance(output, str):
+ try:
+ output = json.dumps(output)
+ except TypeError as e:
+ raise TypeError(f"Output of {func.__name__} must be a string or JSON serializable") from e
+ return output
From 558df345d6d652e454f8202c699da2343aee60a3 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 13:43:14 +0000
Subject: [PATCH 64/70] Relax dependency requirements
---
poetry.lock | 989 ++++++++++++------------
pyproject.toml | 29 +-
src/humanloop/decorators/flow.py | 9 +-
src/humanloop/decorators/prompt.py | 8 +-
src/humanloop/decorators/tool.py | 8 +-
src/humanloop/otel/__init__.py | 5 +
tests/decorators/test_tool_decorator.py | 5 +-
7 files changed, 541 insertions(+), 512 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 483d2160..c6a0f9f0 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -13,13 +13,13 @@ files = [
[[package]]
name = "anthropic"
-version = "0.37.1"
+version = "0.39.0"
description = "The official Python library for the anthropic API"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "anthropic-0.37.1-py3-none-any.whl", hash = "sha256:8f550f88906823752e2abf99fbe491fbc8d40bce4cb26b9663abdf7be990d721"},
- {file = "anthropic-0.37.1.tar.gz", hash = "sha256:99f688265795daa7ba9256ee68eaf2f05d53cd99d7417f4a0c2dc292c106d00a"},
+ {file = "anthropic-0.39.0-py3-none-any.whl", hash = "sha256:ea17093ae0ce0e1768b0c46501d6086b5bcd74ff39d68cd2d6396374e9de7c09"},
+ {file = "anthropic-0.39.0.tar.gz", hash = "sha256:94671cc80765f9ce693f76d63a97ee9bef4c2d6063c044e983d21a2e262f63ba"},
]
[package.dependencies]
@@ -29,7 +29,6 @@ httpx = ">=0.23.0,<1"
jiter = ">=0.4.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
-tokenizers = ">=0.13.0"
typing-extensions = ">=4.7,<5"
[package.extras]
@@ -519,22 +518,26 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2
[[package]]
name = "importlib-metadata"
-version = "8.4.0"
+version = "8.5.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
- {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
+ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
+ {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
]
[package.dependencies]
-zipp = ">=0.5"
+zipp = ">=3.20"
[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+type = ["pytest-mypy"]
[[package]]
name = "iniconfig"
@@ -666,48 +669,55 @@ referencing = ">=0.31.0"
[[package]]
name = "mypy"
-version = "1.0.1"
+version = "1.13.0"
description = "Optional static typing for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
- {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
- {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
- {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
- {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
- {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
- {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
- {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
- {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
- {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
- {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
- {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
- {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
- {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
- {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
- {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
- {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
- {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
- {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
- {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
+ {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
+ {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
+ {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
+ {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
+ {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
+ {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
+ {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
+ {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
+ {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
+ {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
+ {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
+ {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
+ {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
+ {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
+ {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
+ {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
+ {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
+ {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
+ {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
+ {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
+ {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
+ {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
+ {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
+ {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
+ {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
+ {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
+ {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
+ {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
+ {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
+ {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
+ {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
+ {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
-mypy-extensions = ">=0.4.3"
+mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = ">=3.10"
+typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
+faster-cache = ["orjson"]
install-types = ["pip"]
-python2 = ["typed-ast (>=1.4.0,<2)"]
+mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
@@ -777,75 +787,77 @@ files = [
[[package]]
name = "numpy"
-version = "2.1.2"
+version = "2.1.3"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.10"
files = [
- {file = "numpy-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee"},
- {file = "numpy-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884"},
- {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"},
- {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d"},
- {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86"},
- {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7"},
- {file = "numpy-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03"},
- {file = "numpy-2.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466"},
- {file = "numpy-2.1.2-cp310-cp310-win32.whl", hash = "sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb"},
- {file = "numpy-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2"},
- {file = "numpy-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe"},
- {file = "numpy-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1"},
- {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f"},
- {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4"},
- {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a"},
- {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1"},
- {file = "numpy-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2"},
- {file = "numpy-2.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146"},
- {file = "numpy-2.1.2-cp311-cp311-win32.whl", hash = "sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c"},
- {file = "numpy-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9"},
- {file = "numpy-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b"},
- {file = "numpy-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db"},
- {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1"},
- {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426"},
- {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0"},
- {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df"},
- {file = "numpy-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366"},
- {file = "numpy-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142"},
- {file = "numpy-2.1.2-cp312-cp312-win32.whl", hash = "sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550"},
- {file = "numpy-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e"},
- {file = "numpy-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d"},
- {file = "numpy-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf"},
- {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e"},
- {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3"},
- {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8"},
- {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a"},
- {file = "numpy-2.1.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98"},
- {file = "numpy-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe"},
- {file = "numpy-2.1.2-cp313-cp313-win32.whl", hash = "sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a"},
- {file = "numpy-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445"},
- {file = "numpy-2.1.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5"},
- {file = "numpy-2.1.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0"},
- {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17"},
- {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6"},
- {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8"},
- {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35"},
- {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62"},
- {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a"},
- {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952"},
- {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5"},
- {file = "numpy-2.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7"},
- {file = "numpy-2.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e"},
- {file = "numpy-2.1.2.tar.gz", hash = "sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c"},
+ {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"},
+ {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"},
+ {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"},
+ {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"},
+ {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"},
+ {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"},
+ {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"},
+ {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"},
+ {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"},
+ {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"},
+ {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"},
+ {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"},
+ {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"},
+ {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"},
+ {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"},
+ {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"},
+ {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"},
+ {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"},
+ {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"},
+ {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"},
+ {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"},
+ {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"},
+ {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"},
+ {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"},
+ {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"},
+ {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"},
+ {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"},
+ {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"},
+ {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"},
+ {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"},
+ {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"},
+ {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"},
+ {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"},
+ {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"},
+ {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"},
+ {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"},
+ {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"},
+ {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"},
+ {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"},
+ {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"},
+ {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"},
+ {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"},
+ {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"},
+ {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"},
+ {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"},
+ {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"},
+ {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"},
+ {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"},
+ {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"},
+ {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"},
+ {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"},
+ {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"},
+ {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"},
+ {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"},
+ {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"},
]
[[package]]
name = "openai"
-version = "1.53.0"
+version = "1.54.3"
description = "The official Python library for the openai API"
optional = false
-python-versions = ">=3.7.1"
+python-versions = ">=3.8"
files = [
- {file = "openai-1.53.0-py3-none-any.whl", hash = "sha256:20f408c32fc5cb66e60c6882c994cdca580a5648e10045cd840734194f033418"},
- {file = "openai-1.53.0.tar.gz", hash = "sha256:be2c4e77721b166cce8130e544178b7d579f751b4b074ffbaade3854b6f85ec5"},
+ {file = "openai-1.54.3-py3-none-any.whl", hash = "sha256:f18dbaf09c50d70c4185b892a2a553f80681d1d866323a2da7f7be2f688615d5"},
+ {file = "openai-1.54.3.tar.gz", hash = "sha256:7511b74eeb894ac0b0253dc71f087a15d2e4d71d22d0088767205143d880cca6"},
]
[package.dependencies]
@@ -863,161 +875,180 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
[[package]]
name = "opentelemetry-api"
-version = "1.27.0"
+version = "1.28.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"},
- {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"},
+ {file = "opentelemetry_api-1.28.1-py3-none-any.whl", hash = "sha256:bfe86c95576cf19a914497f439fd79c9553a38de0adbdc26f7cfc46b0c00b16c"},
+ {file = "opentelemetry_api-1.28.1.tar.gz", hash = "sha256:6fa7295a12c707f5aebef82da3d9ec5afe6992f3e42bfe7bec0339a44b3518e7"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-importlib-metadata = ">=6.0,<=8.4.0"
+importlib-metadata = ">=6.0,<=8.5.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.48b0"
+version = "0.49b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"},
- {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"},
+ {file = "opentelemetry_instrumentation-0.49b1-py3-none-any.whl", hash = "sha256:0a9d3821736104013693ef3b8a9d29b41f2f3a81ee2d8c9288b52d62bae5747c"},
+ {file = "opentelemetry_instrumentation-0.49b1.tar.gz", hash = "sha256:2d0e41181b7957ba061bb436b969ad90545ac3eba65f290830009b4264d2824e"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-setuptools = ">=16.0"
+opentelemetry-semantic-conventions = "0.49b1"
+packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.33.3"
+version = "0.33.11"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.33.3-py3-none-any.whl", hash = "sha256:dc4110c6400708d600f79fd78e8e8fe04b90a82b44949817cc91c961cd4db6e7"},
- {file = "opentelemetry_instrumentation_anthropic-0.33.3.tar.gz", hash = "sha256:d245f1c732caebe4706a4900084758296d1d46d37e042bbd8542d0aa0e691899"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.11-py3-none-any.whl", hash = "sha256:4e9622fcac4cb4e09bcefe8d6f5cf0776a5e20e073133ebb0b7c4e82f2b0b06a"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.11.tar.gz", hash = "sha256:b4671cec5ef7e8b138c2a3d37a8d99e757fdda1a7c67e0cbc28cfca1928b23bb"},
]
[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
+
+[[package]]
+name = "opentelemetry-instrumentation-bedrock"
+version = "0.33.11"
+description = "OpenTelemetry Bedrock instrumentation"
+optional = false
+python-versions = "<4,>=3.9"
+files = [
+ {file = "opentelemetry_instrumentation_bedrock-0.33.11-py3-none-any.whl", hash = "sha256:ccafdd60dfe10f0f9a0dfbf0a32ec998d120f4af46246b11d89863ad6ea9a9f1"},
+ {file = "opentelemetry_instrumentation_bedrock-0.33.11.tar.gz", hash = "sha256:3457f439488e6674da2b9180f4f73ecb75868e5315b81f11217cd3dea97a0c1e"},
+]
+
+[package.dependencies]
+anthropic = ">=0.17.0"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.33.3"
+version = "0.33.11"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.33.3-py3-none-any.whl", hash = "sha256:b0a614a321f332e31eb74980a603303123b58a3627a11e7db5f13a8b3c660311"},
- {file = "opentelemetry_instrumentation_cohere-0.33.3.tar.gz", hash = "sha256:9d940cb30b7e4be94f063f5afadeb2572f4cfe69a731d7c45faaa9f034991a5e"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.11-py3-none-any.whl", hash = "sha256:13c9afab4a9c0a90d33ac1bb7530535b0acee31dc056a53ae78d0daf879fdf26"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.11.tar.gz", hash = "sha256:07ebd381cc7ce0d14bc61649e2191f2a65962c609439979447fc8b1d09580310"},
]
[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.33.3"
+version = "0.33.11"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.33.3-py3-none-any.whl", hash = "sha256:53d75f8ec2dbcf5e0f06ed53a7a4cb875823749cb96bbc07dbb7a1d5ee374e32"},
- {file = "opentelemetry_instrumentation_groq-0.33.3.tar.gz", hash = "sha256:98408aaf91e2d55ad348deb12666339fbcb972b18ec511c4f394d3fac37041eb"},
+ {file = "opentelemetry_instrumentation_groq-0.33.11-py3-none-any.whl", hash = "sha256:c3c204abfd9a0096eb8b7aacd719b62b87ad812022bbb0c9ee38e05508123cb7"},
+ {file = "opentelemetry_instrumentation_groq-0.33.11.tar.gz", hash = "sha256:26792d1542c8f1d59aa65e83e61fa452f239c9dbf860c545e8268f944413af80"},
]
[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.33.3"
+version = "0.33.11"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.33.3-py3-none-any.whl", hash = "sha256:f5ef4452b269bb409cc260fd611834c33296495e39700fd6e6f83a1cef07b9fd"},
- {file = "opentelemetry_instrumentation_openai-0.33.3.tar.gz", hash = "sha256:06ad92d5d852f93ee7c0d9b545a412df5265044dae4d6be7056a10fa8afb2fdc"},
+ {file = "opentelemetry_instrumentation_openai-0.33.11-py3-none-any.whl", hash = "sha256:9289f8c4e0989fae95549840d45f797a1269899b5d4264bdf0be356b8f7c3f7e"},
+ {file = "opentelemetry_instrumentation_openai-0.33.11.tar.gz", hash = "sha256:bacb7fb8facb2c45076437c5b3e526fda486db3b76edbb01b18e4e133b70eed0"},
]
[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.33.3"
+version = "0.33.11"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.33.3-py3-none-any.whl", hash = "sha256:c2870c1939b69ff3c57a508404cec75329e07c907eb9600f47ec64be2c0b8310"},
- {file = "opentelemetry_instrumentation_replicate-0.33.3.tar.gz", hash = "sha256:06c9f63f7c235392567b10efe20f8cb2379f322d0a72e4c52ab4912f1ebb943a"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.11-py3-none-any.whl", hash = "sha256:58619b28979efd83032e941d53850110bac62b125ff3e7e8b57d3f42f4e9170a"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.11.tar.gz", hash = "sha256:52cdf7e5a6b5cd663aa1a1b3a1771c77b9c9dbbffe5ada1acf28d266650950ae"},
]
[package.dependencies]
-opentelemetry-api = ">=1.27.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
-opentelemetry-semantic-conventions-ai = "0.4.1"
+opentelemetry-api = ">=1.28.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-sdk"
-version = "1.27.0"
+version = "1.28.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"},
- {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"},
+ {file = "opentelemetry_sdk-1.28.1-py3-none-any.whl", hash = "sha256:72aad7f5fcbe37113c4ab4899f6cdeb6ac77ed3e62f25a85e3627b12583dad0f"},
+ {file = "opentelemetry_sdk-1.28.1.tar.gz", hash = "sha256:100fa371b2046ffba6a340c18f0b2a0463acad7461e5177e126693b613a6ca57"},
]
[package.dependencies]
-opentelemetry-api = "1.27.0"
-opentelemetry-semantic-conventions = "0.48b0"
+opentelemetry-api = "1.28.1"
+opentelemetry-semantic-conventions = "0.49b1"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.48b0"
+version = "0.49b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"},
- {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"},
+ {file = "opentelemetry_semantic_conventions-0.49b1-py3-none-any.whl", hash = "sha256:dd6f3ac8169d2198c752e1a63f827e5f5e110ae9b0ce33f2aad9a3baf0739743"},
+ {file = "opentelemetry_semantic_conventions-0.49b1.tar.gz", hash = "sha256:91817883b159ffb94c2ca9548509c4fe0aafce7c24f437aa6ac3fc613aa9a758"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-opentelemetry-api = "1.27.0"
+opentelemetry-api = "1.28.1"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.1"
+version = "0.4.2"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.1-py3-none-any.whl", hash = "sha256:b6c6e3976a5ea31058faeaf0450a6a56d4576a9734c94c1a4cb82332ee635fe3"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.1.tar.gz", hash = "sha256:aaf59b2f24d745692170b96d86d7c5560f42443dcf88ced49ae9d4542db1902f"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.2-py3-none-any.whl", hash = "sha256:0a5432aacd441eb7dbdf62e0de3f3d90ed4f69595b687a6dd2ccc4c5b94c5861"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.2.tar.gz", hash = "sha256:90b969c7d838e03e30a9150ffe46543d8e58e9d7370c7221fd30d4ce4d7a1b96"},
]
[[package]]
@@ -1084,9 +1115,9 @@ files = [
[package.dependencies]
numpy = [
+ {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
- {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
@@ -1192,8 +1223,8 @@ files = [
annotated-types = ">=0.6.0"
pydantic-core = "2.23.4"
typing-extensions = [
- {version = ">=4.6.1", markers = "python_version < \"3.13\""},
{version = ">=4.12.2", markers = "python_version >= \"3.13\""},
+ {version = ">=4.6.1", markers = "python_version < \"3.13\""},
]
[package.extras]
@@ -1459,105 +1490,105 @@ rpds-py = ">=0.7.0"
[[package]]
name = "regex"
-version = "2024.9.11"
+version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
files = [
- {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"},
- {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"},
- {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"},
- {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"},
- {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"},
- {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"},
- {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"},
- {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"},
- {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"},
- {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"},
- {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"},
- {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"},
- {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"},
- {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"},
- {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"},
- {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"},
- {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"},
- {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"},
- {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"},
- {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"},
- {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"},
- {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:35f4a6f96aa6cb3f2f7247027b07b15a374f0d5b912c0001418d1d55024d5cb4"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:55b96e7ce3a69a8449a66984c268062fbaa0d8ae437b285428e12797baefce7e"},
- {file = "regex-2024.9.11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb130fccd1a37ed894824b8c046321540263013da72745d755f2d35114b81a60"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:323c1f04be6b2968944d730e5c2091c8c89767903ecaa135203eec4565ed2b2b"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be1c8ed48c4c4065ecb19d882a0ce1afe0745dfad8ce48c49586b90a55f02366"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5b029322e6e7b94fff16cd120ab35a253236a5f99a79fb04fda7ae71ca20ae8"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6fff13ef6b5f29221d6904aa816c34701462956aa72a77f1f151a8ec4f56aeb"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d4af3979376652010e400accc30404e6c16b7df574048ab1f581af82065e4"},
- {file = "regex-2024.9.11-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:079400a8269544b955ffa9e31f186f01d96829110a3bf79dc338e9910f794fca"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f9268774428ec173654985ce55fc6caf4c6d11ade0f6f914d48ef4719eb05ebb"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:23f9985c8784e544d53fc2930fc1ac1a7319f5d5332d228437acc9f418f2f168"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:ae2941333154baff9838e88aa71c1d84f4438189ecc6021a12c7573728b5838e"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:e93f1c331ca8e86fe877a48ad64e77882c0c4da0097f2212873a69bbfea95d0c"},
- {file = "regex-2024.9.11-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:846bc79ee753acf93aef4184c040d709940c9d001029ceb7b7a52747b80ed2dd"},
- {file = "regex-2024.9.11-cp38-cp38-win32.whl", hash = "sha256:c94bb0a9f1db10a1d16c00880bdebd5f9faf267273b8f5bd1878126e0fbde771"},
- {file = "regex-2024.9.11-cp38-cp38-win_amd64.whl", hash = "sha256:2b08fce89fbd45664d3df6ad93e554b6c16933ffa9d55cb7e01182baaf971508"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:07f45f287469039ffc2c53caf6803cd506eb5f5f637f1d4acb37a738f71dd066"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4838e24ee015101d9f901988001038f7f0d90dc0c3b115541a1365fb439add62"},
- {file = "regex-2024.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6edd623bae6a737f10ce853ea076f56f507fd7726bee96a41ee3d68d347e4d16"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c69ada171c2d0e97a4b5aa78fbb835e0ffbb6b13fc5da968c09811346564f0d3"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02087ea0a03b4af1ed6ebab2c54d7118127fee8d71b26398e8e4b05b78963199"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:69dee6a020693d12a3cf892aba4808fe168d2a4cef368eb9bf74f5398bfd4ee8"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:297f54910247508e6e5cae669f2bc308985c60540a4edd1c77203ef19bfa63ca"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ecea58b43a67b1b79805f1a0255730edaf5191ecef84dbc4cc85eb30bc8b63b9"},
- {file = "regex-2024.9.11-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:eab4bb380f15e189d1313195b062a6aa908f5bd687a0ceccd47c8211e9cf0d4a"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0cbff728659ce4bbf4c30b2a1be040faafaa9eca6ecde40aaff86f7889f4ab39"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:54c4a097b8bc5bb0dfc83ae498061d53ad7b5762e00f4adaa23bee22b012e6ba"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:73d6d2f64f4d894c96626a75578b0bf7d9e56dcda8c3d037a2118fdfe9b1c664"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:e53b5fbab5d675aec9f0c501274c467c0f9a5d23696cfc94247e1fb56501ed89"},
- {file = "regex-2024.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ffbcf9221e04502fc35e54d1ce9567541979c3fdfb93d2c554f0ca583a19b35"},
- {file = "regex-2024.9.11-cp39-cp39-win32.whl", hash = "sha256:e4c22e1ac1f1ec1e09f72e6c44d8f2244173db7eb9629cc3a346a8d7ccc31142"},
- {file = "regex-2024.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:faa3c142464efec496967359ca99696c896c591c56c53506bac1ad465f66e919"},
- {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"},
+ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
+ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
+ {file = "regex-2024.11.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164d8b7b3b4bcb2068b97428060b2a53be050085ef94eca7f240e7947f1b080e"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3660c82f209655a06b587d55e723f0b813d3a7db2e32e5e7dc64ac2a9e86fde"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d22326fcdef5e08c154280b71163ced384b428343ae16a5ab2b3354aed12436e"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1ac758ef6aebfc8943560194e9fd0fa18bcb34d89fd8bd2af18183afd8da3a2"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997d6a487ff00807ba810e0f8332c18b4eb8d29463cfb7c820dc4b6e7562d0cf"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:02a02d2bb04fec86ad61f3ea7f49c015a0681bf76abb9857f945d26159d2968c"},
+ {file = "regex-2024.11.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f02f93b92358ee3f78660e43b4b0091229260c5d5c408d17d60bf26b6c900e86"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:06eb1be98df10e81ebaded73fcd51989dcf534e3c753466e4b60c4697a003b67"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:040df6fe1a5504eb0f04f048e6d09cd7c7110fef851d7c567a6b6e09942feb7d"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabbfc59f2c6edba2a6622c647b716e34e8e3867e0ab975412c5c2f79b82da2"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:8447d2d39b5abe381419319f942de20b7ecd60ce86f16a23b0698f22e1b70008"},
+ {file = "regex-2024.11.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:da8f5fc57d1933de22a9e23eec290a0d8a5927a5370d24bda9a6abe50683fe62"},
+ {file = "regex-2024.11.6-cp310-cp310-win32.whl", hash = "sha256:b489578720afb782f6ccf2840920f3a32e31ba28a4b162e13900c3e6bd3f930e"},
+ {file = "regex-2024.11.6-cp310-cp310-win_amd64.whl", hash = "sha256:5071b2093e793357c9d8b2929dfc13ac5f0a6c650559503bb81189d0a3814519"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"},
+ {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"},
+ {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"},
+ {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"},
+ {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"},
+ {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"},
+ {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"},
+ {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"},
+ {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"},
+ {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"},
+ {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"},
+ {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"},
+ {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"},
+ {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"},
+ {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"},
+ {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3a51ccc315653ba012774efca4f23d1d2a8a8f278a6072e29c7147eee7da446b"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ad182d02e40de7459b73155deb8996bbd8e96852267879396fb274e8700190e3"},
+ {file = "regex-2024.11.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba9b72e5643641b7d41fa1f6d5abda2c9a263ae835b917348fc3c928182ad467"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40291b1b89ca6ad8d3f2b82782cc33807f1406cf68c8d440861da6304d8ffbbd"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cdf58d0e516ee426a48f7b2c03a332a4114420716d55769ff7108c37a09951bf"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a36fdf2af13c2b14738f6e973aba563623cb77d753bbbd8d414d18bfaa3105dd"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1cee317bfc014c2419a76bcc87f071405e3966da434e03e13beb45f8aced1a6"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:50153825ee016b91549962f970d6a4442fa106832e14c918acd1c8e479916c4f"},
+ {file = "regex-2024.11.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea1bfda2f7162605f6e8178223576856b3d791109f15ea99a9f95c16a7636fb5"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:df951c5f4a1b1910f1a99ff42c473ff60f8225baa1cdd3539fe2819d9543e9df"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:072623554418a9911446278f16ecb398fb3b540147a7828c06e2011fa531e773"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:f654882311409afb1d780b940234208a252322c24a93b442ca714d119e68086c"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:89d75e7293d2b3e674db7d4d9b1bee7f8f3d1609428e293771d1a962617150cc"},
+ {file = "regex-2024.11.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:f65557897fc977a44ab205ea871b690adaef6b9da6afda4790a2484b04293a5f"},
+ {file = "regex-2024.11.6-cp38-cp38-win32.whl", hash = "sha256:6f44ec28b1f858c98d3036ad5d7d0bfc568bdd7a74f9c24e25f41ef1ebfd81a4"},
+ {file = "regex-2024.11.6-cp38-cp38-win_amd64.whl", hash = "sha256:bb8f74f2f10dbf13a0be8de623ba4f9491faf58c24064f32b65679b021ed0001"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5704e174f8ccab2026bd2f1ab6c510345ae8eac818b613d7d73e785f1310f839"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:220902c3c5cc6af55d4fe19ead504de80eb91f786dc102fbd74894b1551f095e"},
+ {file = "regex-2024.11.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7e351589da0850c125f1600a4c4ba3c722efefe16b297de54300f08d734fbf"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5056b185ca113c88e18223183aa1a50e66507769c9640a6ff75859619d73957b"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e34b51b650b23ed3354b5a07aab37034d9f923db2a40519139af34f485f77d0"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5670bce7b200273eee1840ef307bfa07cda90b38ae56e9a6ebcc9f50da9c469b"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08986dce1339bc932923e7d1232ce9881499a0e02925f7402fb7c982515419ef"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c0b12d3d3bc25af4ebbf38f9ee780a487e8bf6954c115b9f015822d3bb8e48"},
+ {file = "regex-2024.11.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:764e71f22ab3b305e7f4c21f1a97e1526a25ebdd22513e251cf376760213da13"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:f056bf21105c2515c32372bbc057f43eb02aae2fda61052e2f7622c801f0b4e2"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69ab78f848845569401469da20df3e081e6b5a11cb086de3eed1d48f5ed57c95"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:86fddba590aad9208e2fa8b43b4c098bb0ec74f15718bb6a704e3c63e2cef3e9"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:684d7a212682996d21ca12ef3c17353c021fe9de6049e19ac8481ec35574a70f"},
+ {file = "regex-2024.11.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a03e02f48cd1abbd9f3b7e3586d97c8f7a9721c436f51a5245b3b9483044480b"},
+ {file = "regex-2024.11.6-cp39-cp39-win32.whl", hash = "sha256:41758407fc32d5c3c5de163888068cfee69cb4c2be844e7ac517a52770f9af57"},
+ {file = "regex-2024.11.6-cp39-cp39-win_amd64.whl", hash = "sha256:b2837718570f95dd41675328e111345f9b7095d821bac435aac173ac80b19983"},
+ {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"},
]
[[package]]
@@ -1600,114 +1631,101 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "rpds-py"
-version = "0.20.1"
+version = "0.21.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "rpds_py-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a649dfd735fff086e8a9d0503a9f0c7d01b7912a333c7ae77e1515c08c146dad"},
- {file = "rpds_py-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f16bc1334853e91ddaaa1217045dd7be166170beec337576818461268a3de67f"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14511a539afee6f9ab492b543060c7491c99924314977a55c98bfa2ee29ce78c"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3ccb8ac2d3c71cda472b75af42818981bdacf48d2e21c36331b50b4f16930163"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c142b88039b92e7e0cb2552e8967077e3179b22359e945574f5e2764c3953dcf"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f19169781dddae7478a32301b499b2858bc52fc45a112955e798ee307e294977"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13c56de6518e14b9bf6edde23c4c39dac5b48dcf04160ea7bce8fca8397cdf86"},
- {file = "rpds_py-0.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:925d176a549f4832c6f69fa6026071294ab5910e82a0fe6c6228fce17b0706bd"},
- {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:78f0b6877bfce7a3d1ff150391354a410c55d3cdce386f862926a4958ad5ab7e"},
- {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3dd645e2b0dcb0fd05bf58e2e54c13875847687d0b71941ad2e757e5d89d4356"},
- {file = "rpds_py-0.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4f676e21db2f8c72ff0936f895271e7a700aa1f8d31b40e4e43442ba94973899"},
- {file = "rpds_py-0.20.1-cp310-none-win32.whl", hash = "sha256:648386ddd1e19b4a6abab69139b002bc49ebf065b596119f8f37c38e9ecee8ff"},
- {file = "rpds_py-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:d9ecb51120de61e4604650666d1f2b68444d46ae18fd492245a08f53ad2b7711"},
- {file = "rpds_py-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:762703bdd2b30983c1d9e62b4c88664df4a8a4d5ec0e9253b0231171f18f6d75"},
- {file = "rpds_py-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0b581f47257a9fce535c4567782a8976002d6b8afa2c39ff616edf87cbeff712"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:842c19a6ce894493563c3bd00d81d5100e8e57d70209e84d5491940fdb8b9e3a"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42cbde7789f5c0bcd6816cb29808e36c01b960fb5d29f11e052215aa85497c93"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c8e9340ce5a52f95fa7d3b552b35c7e8f3874d74a03a8a69279fd5fca5dc751"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ba6f89cac95c0900d932c9efb7f0fb6ca47f6687feec41abcb1bd5e2bd45535"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a916087371afd9648e1962e67403c53f9c49ca47b9680adbeef79da3a7811b0"},
- {file = "rpds_py-0.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:200a23239781f46149e6a415f1e870c5ef1e712939fe8fa63035cd053ac2638e"},
- {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58b1d5dd591973d426cbb2da5e27ba0339209832b2f3315928c9790e13f159e8"},
- {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6b73c67850ca7cae0f6c56f71e356d7e9fa25958d3e18a64927c2d930859b8e4"},
- {file = "rpds_py-0.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d8761c3c891cc51e90bc9926d6d2f59b27beaf86c74622c8979380a29cc23ac3"},
- {file = "rpds_py-0.20.1-cp311-none-win32.whl", hash = "sha256:cd945871335a639275eee904caef90041568ce3b42f402c6959b460d25ae8732"},
- {file = "rpds_py-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:7e21b7031e17c6b0e445f42ccc77f79a97e2687023c5746bfb7a9e45e0921b84"},
- {file = "rpds_py-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:36785be22066966a27348444b40389f8444671630063edfb1a2eb04318721e17"},
- {file = "rpds_py-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:142c0a5124d9bd0e2976089484af5c74f47bd3298f2ed651ef54ea728d2ea42c"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbddc10776ca7ebf2a299c41a4dde8ea0d8e3547bfd731cb87af2e8f5bf8962d"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15a842bb369e00295392e7ce192de9dcbf136954614124a667f9f9f17d6a216f"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:be5ef2f1fc586a7372bfc355986226484e06d1dc4f9402539872c8bb99e34b01"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbcf360c9e3399b056a238523146ea77eeb2a596ce263b8814c900263e46031a"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecd27a66740ffd621d20b9a2f2b5ee4129a56e27bfb9458a3bcc2e45794c96cb"},
- {file = "rpds_py-0.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0b937b2a1988f184a3e9e577adaa8aede21ec0b38320d6009e02bd026db04fa"},
- {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6889469bfdc1eddf489729b471303739bf04555bb151fe8875931f8564309afc"},
- {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:19b73643c802f4eaf13d97f7855d0fb527fbc92ab7013c4ad0e13a6ae0ed23bd"},
- {file = "rpds_py-0.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3c6afcf2338e7f374e8edc765c79fbcb4061d02b15dd5f8f314a4af2bdc7feb5"},
- {file = "rpds_py-0.20.1-cp312-none-win32.whl", hash = "sha256:dc73505153798c6f74854aba69cc75953888cf9866465196889c7cdd351e720c"},
- {file = "rpds_py-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:8bbe951244a838a51289ee53a6bae3a07f26d4e179b96fc7ddd3301caf0518eb"},
- {file = "rpds_py-0.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6ca91093a4a8da4afae7fe6a222c3b53ee4eef433ebfee4d54978a103435159e"},
- {file = "rpds_py-0.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b9c2fe36d1f758b28121bef29ed1dee9b7a2453e997528e7d1ac99b94892527c"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f009c69bc8c53db5dfab72ac760895dc1f2bc1b62ab7408b253c8d1ec52459fc"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6740a3e8d43a32629bb9b009017ea5b9e713b7210ba48ac8d4cb6d99d86c8ee8"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32b922e13d4c0080d03e7b62991ad7f5007d9cd74e239c4b16bc85ae8b70252d"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe00a9057d100e69b4ae4a094203a708d65b0f345ed546fdef86498bf5390982"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fe9b04b6fa685bd39237d45fad89ba19e9163a1ccaa16611a812e682913496"},
- {file = "rpds_py-0.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa7ac11e294304e615b43f8c441fee5d40094275ed7311f3420d805fde9b07b4"},
- {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aa97af1558a9bef4025f8f5d8c60d712e0a3b13a2fe875511defc6ee77a1ab7"},
- {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:483b29f6f7ffa6af845107d4efe2e3fa8fb2693de8657bc1849f674296ff6a5a"},
- {file = "rpds_py-0.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37fe0f12aebb6a0e3e17bb4cd356b1286d2d18d2e93b2d39fe647138458b4bcb"},
- {file = "rpds_py-0.20.1-cp313-none-win32.whl", hash = "sha256:a624cc00ef2158e04188df5e3016385b9353638139a06fb77057b3498f794782"},
- {file = "rpds_py-0.20.1-cp313-none-win_amd64.whl", hash = "sha256:b71b8666eeea69d6363248822078c075bac6ed135faa9216aa85f295ff009b1e"},
- {file = "rpds_py-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5b48e790e0355865197ad0aca8cde3d8ede347831e1959e158369eb3493d2191"},
- {file = "rpds_py-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3e310838a5801795207c66c73ea903deda321e6146d6f282e85fa7e3e4854804"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249280b870e6a42c0d972339e9cc22ee98730a99cd7f2f727549af80dd5a963"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e79059d67bea28b53d255c1437b25391653263f0e69cd7dec170d778fdbca95e"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b431c777c9653e569986ecf69ff4a5dba281cded16043d348bf9ba505486f36"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da584ff96ec95e97925174eb8237e32f626e7a1a97888cdd27ee2f1f24dd0ad8"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a0629ec053fc013808a85178524e3cb63a61dbc35b22499870194a63578fb9"},
- {file = "rpds_py-0.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fbf15aff64a163db29a91ed0868af181d6f68ec1a3a7d5afcfe4501252840bad"},
- {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:07924c1b938798797d60c6308fa8ad3b3f0201802f82e4a2c41bb3fafb44cc28"},
- {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:4a5a844f68776a7715ecb30843b453f07ac89bad393431efbf7accca3ef599c1"},
- {file = "rpds_py-0.20.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:518d2ca43c358929bf08f9079b617f1c2ca6e8848f83c1225c88caeac46e6cbc"},
- {file = "rpds_py-0.20.1-cp38-none-win32.whl", hash = "sha256:3aea7eed3e55119635a74bbeb80b35e776bafccb70d97e8ff838816c124539f1"},
- {file = "rpds_py-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:7dca7081e9a0c3b6490a145593f6fe3173a94197f2cb9891183ef75e9d64c425"},
- {file = "rpds_py-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b41b6321805c472f66990c2849e152aff7bc359eb92f781e3f606609eac877ad"},
- {file = "rpds_py-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a90c373ea2975519b58dece25853dbcb9779b05cc46b4819cb1917e3b3215b6"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16d4477bcb9fbbd7b5b0e4a5d9b493e42026c0bf1f06f723a9353f5153e75d30"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84b8382a90539910b53a6307f7c35697bc7e6ffb25d9c1d4e998a13e842a5e83"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4888e117dd41b9d34194d9e31631af70d3d526efc363085e3089ab1a62c32ed1"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5265505b3d61a0f56618c9b941dc54dc334dc6e660f1592d112cd103d914a6db"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e75ba609dba23f2c95b776efb9dd3f0b78a76a151e96f96cc5b6b1b0004de66f"},
- {file = "rpds_py-0.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1791ff70bc975b098fe6ecf04356a10e9e2bd7dc21fa7351c1742fdeb9b4966f"},
- {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d126b52e4a473d40232ec2052a8b232270ed1f8c9571aaf33f73a14cc298c24f"},
- {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c14937af98c4cc362a1d4374806204dd51b1e12dded1ae30645c298e5a5c4cb1"},
- {file = "rpds_py-0.20.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3d089d0b88996df627693639d123c8158cff41c0651f646cd8fd292c7da90eaf"},
- {file = "rpds_py-0.20.1-cp39-none-win32.whl", hash = "sha256:653647b8838cf83b2e7e6a0364f49af96deec64d2a6578324db58380cff82aca"},
- {file = "rpds_py-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:fa41a64ac5b08b292906e248549ab48b69c5428f3987b09689ab2441f267d04d"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a07ced2b22f0cf0b55a6a510078174c31b6d8544f3bc00c2bcee52b3d613f74"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:68cb0a499f2c4a088fd2f521453e22ed3527154136a855c62e148b7883b99f9a"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3060d885657abc549b2a0f8e1b79699290e5d83845141717c6c90c2df38311"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:95f3b65d2392e1c5cec27cff08fdc0080270d5a1a4b2ea1d51d5f4a2620ff08d"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2cc3712a4b0b76a1d45a9302dd2f53ff339614b1c29603a911318f2357b04dd2"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d4eea0761e37485c9b81400437adb11c40e13ef513375bbd6973e34100aeb06"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f5179583d7a6cdb981151dd349786cbc318bab54963a192692d945dd3f6435d"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2fbb0ffc754490aff6dabbf28064be47f0f9ca0b9755976f945214965b3ace7e"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:a94e52537a0e0a85429eda9e49f272ada715506d3b2431f64b8a3e34eb5f3e75"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:92b68b79c0da2a980b1c4197e56ac3dd0c8a149b4603747c4378914a68706979"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:93da1d3db08a827eda74356f9f58884adb254e59b6664f64cc04cdff2cc19b0d"},
- {file = "rpds_py-0.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:754bbed1a4ca48479e9d4182a561d001bbf81543876cdded6f695ec3d465846b"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ca449520e7484534a2a44faf629362cae62b660601432d04c482283c47eaebab"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9c4cb04a16b0f199a8c9bf807269b2f63b7b5b11425e4a6bd44bd6961d28282c"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb63804105143c7e24cee7db89e37cb3f3941f8e80c4379a0b355c52a52b6780"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:55cd1fa4ecfa6d9f14fbd97ac24803e6f73e897c738f771a9fe038f2f11ff07c"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f8f741b6292c86059ed175d80eefa80997125b7c478fb8769fd9ac8943a16c0"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fc212779bf8411667234b3cdd34d53de6c2b8b8b958e1e12cb473a5f367c338"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ad56edabcdb428c2e33bbf24f255fe2b43253b7d13a2cdbf05de955217313e6"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a3a1e9ee9728b2c1734f65d6a1d376c6f2f6fdcc13bb007a08cc4b1ff576dc5"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e13de156137b7095442b288e72f33503a469aa1980ed856b43c353ac86390519"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:07f59760ef99f31422c49038964b31c4dfcfeb5d2384ebfc71058a7c9adae2d2"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:59240685e7da61fb78f65a9f07f8108e36a83317c53f7b276b4175dc44151684"},
- {file = "rpds_py-0.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:83cba698cfb3c2c5a7c3c6bac12fe6c6a51aae69513726be6411076185a8b24a"},
- {file = "rpds_py-0.20.1.tar.gz", hash = "sha256:e1791c4aabd117653530dccd24108fa03cc6baf21f58b950d0a73c3b3b29a350"},
+ {file = "rpds_py-0.21.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a017f813f24b9df929674d0332a374d40d7f0162b326562daae8066b502d0590"},
+ {file = "rpds_py-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:20cc1ed0bcc86d8e1a7e968cce15be45178fd16e2ff656a243145e0b439bd250"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad116dda078d0bc4886cb7840e19811562acdc7a8e296ea6ec37e70326c1b41c"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:808f1ac7cf3b44f81c9475475ceb221f982ef548e44e024ad5f9e7060649540e"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de552f4a1916e520f2703ec474d2b4d3f86d41f353e7680b597512ffe7eac5d0"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:efec946f331349dfc4ae9d0e034c263ddde19414fe5128580f512619abed05f1"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b80b4690bbff51a034bfde9c9f6bf9357f0a8c61f548942b80f7b66356508bf5"},
+ {file = "rpds_py-0.21.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:085ed25baac88953d4283e5b5bd094b155075bb40d07c29c4f073e10623f9f2e"},
+ {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:daa8efac2a1273eed2354397a51216ae1e198ecbce9036fba4e7610b308b6153"},
+ {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:95a5bad1ac8a5c77b4e658671642e4af3707f095d2b78a1fdd08af0dfb647624"},
+ {file = "rpds_py-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3e53861b29a13d5b70116ea4230b5f0f3547b2c222c5daa090eb7c9c82d7f664"},
+ {file = "rpds_py-0.21.0-cp310-none-win32.whl", hash = "sha256:ea3a6ac4d74820c98fcc9da4a57847ad2cc36475a8bd9683f32ab6d47a2bd682"},
+ {file = "rpds_py-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:b8f107395f2f1d151181880b69a2869c69e87ec079c49c0016ab96860b6acbe5"},
+ {file = "rpds_py-0.21.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5555db3e618a77034954b9dc547eae94166391a98eb867905ec8fcbce1308d95"},
+ {file = "rpds_py-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:97ef67d9bbc3e15584c2f3c74bcf064af36336c10d2e21a2131e123ce0f924c9"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ab2c2a26d2f69cdf833174f4d9d86118edc781ad9a8fa13970b527bf8236027"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4e8921a259f54bfbc755c5bbd60c82bb2339ae0324163f32868f63f0ebb873d9"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a7ff941004d74d55a47f916afc38494bd1cfd4b53c482b77c03147c91ac0ac3"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5145282a7cd2ac16ea0dc46b82167754d5e103a05614b724457cffe614f25bd8"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de609a6f1b682f70bb7163da745ee815d8f230d97276db049ab447767466a09d"},
+ {file = "rpds_py-0.21.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40c91c6e34cf016fa8e6b59d75e3dbe354830777fcfd74c58b279dceb7975b75"},
+ {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d2132377f9deef0c4db89e65e8bb28644ff75a18df5293e132a8d67748397b9f"},
+ {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0a9e0759e7be10109645a9fddaaad0619d58c9bf30a3f248a2ea57a7c417173a"},
+ {file = "rpds_py-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9e20da3957bdf7824afdd4b6eeb29510e83e026473e04952dca565170cd1ecc8"},
+ {file = "rpds_py-0.21.0-cp311-none-win32.whl", hash = "sha256:f71009b0d5e94c0e86533c0b27ed7cacc1239cb51c178fd239c3cfefefb0400a"},
+ {file = "rpds_py-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:e168afe6bf6ab7ab46c8c375606298784ecbe3ba31c0980b7dcbb9631dcba97e"},
+ {file = "rpds_py-0.21.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:30b912c965b2aa76ba5168fd610087bad7fcde47f0a8367ee8f1876086ee6d1d"},
+ {file = "rpds_py-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ca9989d5d9b1b300bc18e1801c67b9f6d2c66b8fd9621b36072ed1df2c977f72"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f54e7106f0001244a5f4cf810ba8d3f9c542e2730821b16e969d6887b664266"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fed5dfefdf384d6fe975cc026886aece4f292feaf69d0eeb716cfd3c5a4dd8be"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:590ef88db231c9c1eece44dcfefd7515d8bf0d986d64d0caf06a81998a9e8cab"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f983e4c2f603c95dde63df633eec42955508eefd8d0f0e6d236d31a044c882d7"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b229ce052ddf1a01c67d68166c19cb004fb3612424921b81c46e7ea7ccf7c3bf"},
+ {file = "rpds_py-0.21.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ebf64e281a06c904a7636781d2e973d1f0926a5b8b480ac658dc0f556e7779f4"},
+ {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:998a8080c4495e4f72132f3d66ff91f5997d799e86cec6ee05342f8f3cda7dca"},
+ {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:98486337f7b4f3c324ab402e83453e25bb844f44418c066623db88e4c56b7c7b"},
+ {file = "rpds_py-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a78d8b634c9df7f8d175451cfeac3810a702ccb85f98ec95797fa98b942cea11"},
+ {file = "rpds_py-0.21.0-cp312-none-win32.whl", hash = "sha256:a58ce66847711c4aa2ecfcfaff04cb0327f907fead8945ffc47d9407f41ff952"},
+ {file = "rpds_py-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:e860f065cc4ea6f256d6f411aba4b1251255366e48e972f8a347cf88077b24fd"},
+ {file = "rpds_py-0.21.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ee4eafd77cc98d355a0d02f263efc0d3ae3ce4a7c24740010a8b4012bbb24937"},
+ {file = "rpds_py-0.21.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:688c93b77e468d72579351a84b95f976bd7b3e84aa6686be6497045ba84be560"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c38dbf31c57032667dd5a2f0568ccde66e868e8f78d5a0d27dcc56d70f3fcd3b"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2d6129137f43f7fa02d41542ffff4871d4aefa724a5fe38e2c31a4e0fd343fb0"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520ed8b99b0bf86a176271f6fe23024323862ac674b1ce5b02a72bfeff3fff44"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaeb25ccfb9b9014a10eaf70904ebf3f79faaa8e60e99e19eef9f478651b9b74"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af04ac89c738e0f0f1b913918024c3eab6e3ace989518ea838807177d38a2e94"},
+ {file = "rpds_py-0.21.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b9b76e2afd585803c53c5b29e992ecd183f68285b62fe2668383a18e74abe7a3"},
+ {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5afb5efde74c54724e1a01118c6e5c15e54e642c42a1ba588ab1f03544ac8c7a"},
+ {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:52c041802a6efa625ea18027a0723676a778869481d16803481ef6cc02ea8cb3"},
+ {file = "rpds_py-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ee1e4fc267b437bb89990b2f2abf6c25765b89b72dd4a11e21934df449e0c976"},
+ {file = "rpds_py-0.21.0-cp313-none-win32.whl", hash = "sha256:0c025820b78817db6a76413fff6866790786c38f95ea3f3d3c93dbb73b632202"},
+ {file = "rpds_py-0.21.0-cp313-none-win_amd64.whl", hash = "sha256:320c808df533695326610a1b6a0a6e98f033e49de55d7dc36a13c8a30cfa756e"},
+ {file = "rpds_py-0.21.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:2c51d99c30091f72a3c5d126fad26236c3f75716b8b5e5cf8effb18889ced928"},
+ {file = "rpds_py-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cbd7504a10b0955ea287114f003b7ad62330c9e65ba012c6223dba646f6ffd05"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6dcc4949be728ede49e6244eabd04064336012b37f5c2200e8ec8eb2988b209c"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f414da5c51bf350e4b7960644617c130140423882305f7574b6cf65a3081cecb"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9afe42102b40007f588666bc7de82451e10c6788f6f70984629db193849dced1"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b929c2bb6e29ab31f12a1117c39f7e6d6450419ab7464a4ea9b0b417174f044"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8404b3717da03cbf773a1d275d01fec84ea007754ed380f63dfc24fb76ce4592"},
+ {file = "rpds_py-0.21.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e12bb09678f38b7597b8346983d2323a6482dcd59e423d9448108c1be37cac9d"},
+ {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58a0e345be4b18e6b8501d3b0aa540dad90caeed814c515e5206bb2ec26736fd"},
+ {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:c3761f62fcfccf0864cc4665b6e7c3f0c626f0380b41b8bd1ce322103fa3ef87"},
+ {file = "rpds_py-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c2b2f71c6ad6c2e4fc9ed9401080badd1469fa9889657ec3abea42a3d6b2e1ed"},
+ {file = "rpds_py-0.21.0-cp39-none-win32.whl", hash = "sha256:b21747f79f360e790525e6f6438c7569ddbfb1b3197b9e65043f25c3c9b489d8"},
+ {file = "rpds_py-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:0626238a43152918f9e72ede9a3b6ccc9e299adc8ade0d67c5e142d564c9a83d"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6b4ef7725386dc0762857097f6b7266a6cdd62bfd209664da6712cb26acef035"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6bc0e697d4d79ab1aacbf20ee5f0df80359ecf55db33ff41481cf3e24f206919"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da52d62a96e61c1c444f3998c434e8b263c384f6d68aca8274d2e08d1906325c"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:98e4fe5db40db87ce1c65031463a760ec7906ab230ad2249b4572c2fc3ef1f9f"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30bdc973f10d28e0337f71d202ff29345320f8bc49a31c90e6c257e1ccef4333"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:faa5e8496c530f9c71f2b4e1c49758b06e5f4055e17144906245c99fa6d45356"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32eb88c30b6a4f0605508023b7141d043a79b14acb3b969aa0b4f99b25bc7d4a"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a89a8ce9e4e75aeb7fa5d8ad0f3fecdee813802592f4f46a15754dcb2fd6b061"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:241e6c125568493f553c3d0fdbb38c74babf54b45cef86439d4cd97ff8feb34d"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:3b766a9f57663396e4f34f5140b3595b233a7b146e94777b97a8413a1da1be18"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:af4a644bf890f56e41e74be7d34e9511e4954894d544ec6b8efe1e21a1a8da6c"},
+ {file = "rpds_py-0.21.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3e30a69a706e8ea20444b98a49f386c17b26f860aa9245329bab0851ed100677"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:031819f906bb146561af051c7cef4ba2003d28cff07efacef59da973ff7969ba"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b876f2bc27ab5954e2fd88890c071bd0ed18b9c50f6ec3de3c50a5ece612f7a6"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc5695c321e518d9f03b7ea6abb5ea3af4567766f9852ad1560f501b17588c7b"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b4de1da871b5c0fd5537b26a6fc6814c3cc05cabe0c941db6e9044ffbb12f04a"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:878f6fea96621fda5303a2867887686d7a198d9e0f8a40be100a63f5d60c88c9"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8eeec67590e94189f434c6d11c426892e396ae59e4801d17a93ac96b8c02a6c"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ff2eba7f6c0cb523d7e9cff0903f2fe1feff8f0b2ceb6bd71c0e20a4dcee271"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a429b99337062877d7875e4ff1a51fe788424d522bd64a8c0a20ef3021fdb6ed"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:d167e4dbbdac48bd58893c7e446684ad5d425b407f9336e04ab52e8b9194e2ed"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:4eb2de8a147ffe0626bfdc275fc6563aa7bf4b6db59cf0d44f0ccd6ca625a24e"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e78868e98f34f34a88e23ee9ccaeeec460e4eaf6db16d51d7a9b883e5e785a5e"},
+ {file = "rpds_py-0.21.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4991ca61656e3160cdaca4851151fd3f4a92e9eba5c7a530ab030d6aee96ec89"},
+ {file = "rpds_py-0.21.0.tar.gz", hash = "sha256:ed6378c9d66d0de903763e7706383d60c33829581f0adff47b6535f1802fa6db"},
]
[[package]]
@@ -1737,26 +1755,6 @@ files = [
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
-[[package]]
-name = "setuptools"
-version = "75.3.0"
-description = "Easily download, build, install, upgrade, and uninstall Python packages"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "setuptools-75.3.0-py3-none-any.whl", hash = "sha256:f2504966861356aa38616760c0f66568e535562374995367b4e69c7143cf6bcd"},
- {file = "setuptools-75.3.0.tar.gz", hash = "sha256:fba5dd4d766e97be1b1681d98712680ae8f2f26d7881245f2ce9e40714f1a686"},
-]
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"]
-core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
-type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.12.*)", "pytest-mypy"]
-
[[package]]
name = "six"
version = "1.16.0"
@@ -1828,111 +1826,123 @@ blobfile = ["blobfile (>=2)"]
[[package]]
name = "tokenizers"
-version = "0.20.1"
+version = "0.20.3"
description = ""
optional = false
python-versions = ">=3.7"
files = [
- {file = "tokenizers-0.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:439261da7c0a5c88bda97acb284d49fbdaf67e9d3b623c0bfd107512d22787a9"},
- {file = "tokenizers-0.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:03dae629d99068b1ea5416d50de0fea13008f04129cc79af77a2a6392792d93c"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b61f561f329ffe4b28367798b89d60c4abf3f815d37413b6352bc6412a359867"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec870fce1ee5248a10be69f7a8408a234d6f2109f8ea827b4f7ecdbf08c9fd15"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d388d1ea8b7447da784e32e3b86a75cce55887e3b22b31c19d0b186b1c677800"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:299c85c1d21135bc01542237979bf25c32efa0d66595dd0069ae259b97fb2dbe"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e96f6c14c9752bb82145636b614d5a78e9cde95edfbe0a85dad0dd5ddd6ec95c"},
- {file = "tokenizers-0.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc9e95ad49c932b80abfbfeaf63b155761e695ad9f8a58c52a47d962d76e310f"},
- {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f22dee205329a636148c325921c73cf3e412e87d31f4d9c3153b302a0200057b"},
- {file = "tokenizers-0.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2ffd9a8895575ac636d44500c66dffaef133823b6b25067604fa73bbc5ec09d"},
- {file = "tokenizers-0.20.1-cp310-none-win32.whl", hash = "sha256:2847843c53f445e0f19ea842a4e48b89dd0db4e62ba6e1e47a2749d6ec11f50d"},
- {file = "tokenizers-0.20.1-cp310-none-win_amd64.whl", hash = "sha256:f9aa93eacd865f2798b9e62f7ce4533cfff4f5fbd50c02926a78e81c74e432cd"},
- {file = "tokenizers-0.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4a717dcb08f2dabbf27ae4b6b20cbbb2ad7ed78ce05a829fae100ff4b3c7ff15"},
- {file = "tokenizers-0.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f84dad1ff1863c648d80628b1b55353d16303431283e4efbb6ab1af56a75832"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:929c8f3afa16a5130a81ab5079c589226273ec618949cce79b46d96e59a84f61"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d10766473954397e2d370f215ebed1cc46dcf6fd3906a2a116aa1d6219bfedc3"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9300fac73ddc7e4b0330acbdda4efaabf74929a4a61e119a32a181f534a11b47"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0ecaf7b0e39caeb1aa6dd6e0975c405716c82c1312b55ac4f716ef563a906969"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5170be9ec942f3d1d317817ced8d749b3e1202670865e4fd465e35d8c259de83"},
- {file = "tokenizers-0.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f1ae08fa9aea5891cbd69df29913e11d3841798e0bfb1ff78b78e4e7ea0a4"},
- {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ee86d4095d3542d73579e953c2e5e07d9321af2ffea6ecc097d16d538a2dea16"},
- {file = "tokenizers-0.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:86dcd08da163912e17b27bbaba5efdc71b4fbffb841530fdb74c5707f3c49216"},
- {file = "tokenizers-0.20.1-cp311-none-win32.whl", hash = "sha256:9af2dc4ee97d037bc6b05fa4429ddc87532c706316c5e11ce2f0596dfcfa77af"},
- {file = "tokenizers-0.20.1-cp311-none-win_amd64.whl", hash = "sha256:899152a78b095559c287b4c6d0099469573bb2055347bb8154db106651296f39"},
- {file = "tokenizers-0.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:407ab666b38e02228fa785e81f7cf79ef929f104bcccf68a64525a54a93ceac9"},
- {file = "tokenizers-0.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f13a2d16032ebc8bd812eb8099b035ac65887d8f0c207261472803b9633cf3e"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e98eee4dca22849fbb56a80acaa899eec5b72055d79637dd6aa15d5e4b8628c9"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47c1bcdd61e61136087459cb9e0b069ff23b5568b008265e5cbc927eae3387ce"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128c1110e950534426e2274837fc06b118ab5f2fa61c3436e60e0aada0ccfd67"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2e2d47a819d2954f2c1cd0ad51bb58ffac6f53a872d5d82d65d79bf76b9896d"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bdd67a0e3503a9a7cf8bc5a4a49cdde5fa5bada09a51e4c7e1c73900297539bd"},
- {file = "tokenizers-0.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689b93d2e26d04da337ac407acec8b5d081d8d135e3e5066a88edd5bdb5aff89"},
- {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0c6a796ddcd9a19ad13cf146997cd5895a421fe6aec8fd970d69f9117bddb45c"},
- {file = "tokenizers-0.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3ea919687aa7001a8ff1ba36ac64f165c4e89035f57998fa6cedcfd877be619d"},
- {file = "tokenizers-0.20.1-cp312-none-win32.whl", hash = "sha256:6d3ac5c1f48358ffe20086bf065e843c0d0a9fce0d7f0f45d5f2f9fba3609ca5"},
- {file = "tokenizers-0.20.1-cp312-none-win_amd64.whl", hash = "sha256:b0874481aea54a178f2bccc45aa2d0c99cd3f79143a0948af6a9a21dcc49173b"},
- {file = "tokenizers-0.20.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:96af92e833bd44760fb17f23f402e07a66339c1dcbe17d79a9b55bb0cc4f038e"},
- {file = "tokenizers-0.20.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:65f34e5b731a262dfa562820818533c38ce32a45864437f3d9c82f26c139ca7f"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17f98fccb5c12ab1ce1f471731a9cd86df5d4bd2cf2880c5a66b229802d96145"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b8c0fc3542cf9370bf92c932eb71bdeb33d2d4aeeb4126d9fd567b60bd04cb30"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b39356df4575d37f9b187bb623aab5abb7b62c8cb702867a1768002f814800c"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfdad27b0e50544f6b838895a373db6114b85112ba5c0cefadffa78d6daae563"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:094663dd0e85ee2e573126918747bdb40044a848fde388efb5b09d57bc74c680"},
- {file = "tokenizers-0.20.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14e4cf033a2aa207d7ac790e91adca598b679999710a632c4a494aab0fc3a1b2"},
- {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9310951c92c9fb91660de0c19a923c432f110dbfad1a2d429fbc44fa956bf64f"},
- {file = "tokenizers-0.20.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05e41e302c315bd2ed86c02e917bf03a6cf7d2f652c9cee1a0eb0d0f1ca0d32c"},
- {file = "tokenizers-0.20.1-cp37-none-win32.whl", hash = "sha256:212231ab7dfcdc879baf4892ca87c726259fa7c887e1688e3f3cead384d8c305"},
- {file = "tokenizers-0.20.1-cp37-none-win_amd64.whl", hash = "sha256:896195eb9dfdc85c8c052e29947169c1fcbe75a254c4b5792cdbd451587bce85"},
- {file = "tokenizers-0.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:741fb22788482d09d68e73ece1495cfc6d9b29a06c37b3df90564a9cfa688e6d"},
- {file = "tokenizers-0.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:10be14ebd8082086a342d969e17fc2d6edc856c59dbdbddd25f158fa40eaf043"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:514cf279b22fa1ae0bc08e143458c74ad3b56cd078b319464959685a35c53d5e"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a647c5b7cb896d6430cf3e01b4e9a2d77f719c84cefcef825d404830c2071da2"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7cdf379219e1e1dd432091058dab325a2e6235ebb23e0aec8d0508567c90cd01"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ba72260449e16c4c2f6f3252823b059fbf2d31b32617e582003f2b18b415c39"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:910b96ed87316e4277b23c7bcaf667ce849c7cc379a453fa179e7e09290eeb25"},
- {file = "tokenizers-0.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e53975a6694428a0586534cc1354b2408d4e010a3103117f617cbb550299797c"},
- {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:07c4b7be58da142b0730cc4e5fd66bb7bf6f57f4986ddda73833cd39efef8a01"},
- {file = "tokenizers-0.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b605c540753e62199bf15cf69c333e934077ef2350262af2ccada46026f83d1c"},
- {file = "tokenizers-0.20.1-cp38-none-win32.whl", hash = "sha256:88b3bc76ab4db1ab95ead623d49c95205411e26302cf9f74203e762ac7e85685"},
- {file = "tokenizers-0.20.1-cp38-none-win_amd64.whl", hash = "sha256:d412a74cf5b3f68a90c615611a5aa4478bb303d1c65961d22db45001df68afcb"},
- {file = "tokenizers-0.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a25dcb2f41a0a6aac31999e6c96a75e9152fa0127af8ece46c2f784f23b8197a"},
- {file = "tokenizers-0.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a12c3cebb8c92e9c35a23ab10d3852aee522f385c28d0b4fe48c0b7527d59762"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02e18da58cf115b7c40de973609c35bde95856012ba42a41ee919c77935af251"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f326a1ac51ae909b9760e34671c26cd0dfe15662f447302a9d5bb2d872bab8ab"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b4872647ea6f25224e2833b044b0b19084e39400e8ead3cfe751238b0802140"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce6238a3311bb8e4c15b12600927d35c267b92a52c881ef5717a900ca14793f7"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57b7a8880b208866508b06ce365dc631e7a2472a3faa24daa430d046fb56c885"},
- {file = "tokenizers-0.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a908c69c2897a68f412aa05ba38bfa87a02980df70f5a72fa8490479308b1f2d"},
- {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:da1001aa46f4490099c82e2facc4fbc06a6a32bf7de3918ba798010954b775e0"},
- {file = "tokenizers-0.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:42c097390e2f0ed0a5c5d569e6669dd4e9fff7b31c6a5ce6e9c66a61687197de"},
- {file = "tokenizers-0.20.1-cp39-none-win32.whl", hash = "sha256:3d4d218573a3d8b121a1f8c801029d70444ffb6d8f129d4cca1c7b672ee4a24c"},
- {file = "tokenizers-0.20.1-cp39-none-win_amd64.whl", hash = "sha256:37d1e6f616c84fceefa7c6484a01df05caf1e207669121c66213cb5b2911d653"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48689da7a395df41114f516208d6550e3e905e1239cc5ad386686d9358e9cef0"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:712f90ea33f9bd2586b4a90d697c26d56d0a22fd3c91104c5858c4b5b6489a79"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:359eceb6a620c965988fc559cebc0a98db26713758ec4df43fb76d41486a8ed5"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d3caf244ce89d24c87545aafc3448be15870096e796c703a0d68547187192e1"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03b03cf8b9a32254b1bf8a305fb95c6daf1baae0c1f93b27f2b08c9759f41dee"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:218e5a3561561ea0f0ef1559c6d95b825308dbec23fb55b70b92589e7ff2e1e8"},
- {file = "tokenizers-0.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f40df5e0294a95131cc5f0e0eb91fe86d88837abfbee46b9b3610b09860195a7"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:08aaa0d72bb65058e8c4b0455f61b840b156c557e2aca57627056624c3a93976"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:998700177b45f70afeb206ad22c08d9e5f3a80639dae1032bf41e8cbc4dada4b"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62f7fbd3c2c38b179556d879edae442b45f68312019c3a6013e56c3947a4e648"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31e87fca4f6bbf5cc67481b562147fe932f73d5602734de7dd18a8f2eee9c6dd"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:956f21d359ae29dd51ca5726d2c9a44ffafa041c623f5aa33749da87cfa809b9"},
- {file = "tokenizers-0.20.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1fbbaf17a393c78d8aedb6a334097c91cb4119a9ced4764ab8cfdc8d254dc9f9"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ebe63e31f9c1a970c53866d814e35ec2ec26fda03097c486f82f3891cee60830"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:81970b80b8ac126910295f8aab2d7ef962009ea39e0d86d304769493f69aaa1e"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130e35e76f9337ed6c31be386e75d4925ea807055acf18ca1a9b0eec03d8fe23"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd28a8614f5c82a54ab2463554e84ad79526c5184cf4573bbac2efbbbcead457"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9041ee665d0fa7f5c4ccf0f81f5e6b7087f797f85b143c094126fc2611fec9d0"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:62eb9daea2a2c06bcd8113a5824af8ef8ee7405d3a71123ba4d52c79bb3d9f1a"},
- {file = "tokenizers-0.20.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f861889707b54a9ab1204030b65fd6c22bdd4a95205deec7994dc22a8baa2ea4"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:89d5c337d74ea6e5e7dc8af124cf177be843bbb9ca6e58c01f75ea103c12c8a9"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0b7f515c83397e73292accdbbbedc62264e070bae9682f06061e2ddce67cacaf"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e0305fc1ec6b1e5052d30d9c1d5c807081a7bd0cae46a33d03117082e91908c"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc611e6ac0fa00a41de19c3bf6391a05ea201d2d22b757d63f5491ec0e67faa"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5ffe0d7f7bfcfa3b2585776ecf11da2e01c317027c8573c78ebcb8985279e23"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e7edb8ec12c100d5458d15b1e47c0eb30ad606a05641f19af7563bc3d1608c14"},
- {file = "tokenizers-0.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:de291633fb9303555793cc544d4a86e858da529b7d0b752bcaf721ae1d74b2c9"},
- {file = "tokenizers-0.20.1.tar.gz", hash = "sha256:84edcc7cdeeee45ceedb65d518fffb77aec69311c9c8e30f77ad84da3025f002"},
+ {file = "tokenizers-0.20.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:31ccab28dbb1a9fe539787210b0026e22debeab1662970f61c2d921f7557f7e4"},
+ {file = "tokenizers-0.20.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c6361191f762bda98c773da418cf511cbaa0cb8d0a1196f16f8c0119bde68ff8"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f128d5da1202b78fa0a10d8d938610472487da01b57098d48f7e944384362514"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79c4121a2e9433ad7ef0769b9ca1f7dd7fa4c0cd501763d0a030afcbc6384481"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7850fde24197fe5cd6556e2fdba53a6d3bae67c531ea33a3d7c420b90904141"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b357970c095dc134978a68c67d845a1e3803ab7c4fbb39195bde914e7e13cf8b"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a333d878c4970b72d6c07848b90c05f6b045cf9273fc2bc04a27211721ad6118"},
+ {file = "tokenizers-0.20.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1fd9fee817f655a8f50049f685e224828abfadd436b8ff67979fc1d054b435f1"},
+ {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9e7816808b402129393a435ea2a509679b41246175d6e5e9f25b8692bfaa272b"},
+ {file = "tokenizers-0.20.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ba96367db9d8a730d3a1d5996b4b7babb846c3994b8ef14008cd8660f55db59d"},
+ {file = "tokenizers-0.20.3-cp310-none-win32.whl", hash = "sha256:ee31ba9d7df6a98619426283e80c6359f167e2e9882d9ce1b0254937dbd32f3f"},
+ {file = "tokenizers-0.20.3-cp310-none-win_amd64.whl", hash = "sha256:a845c08fdad554fe0871d1255df85772f91236e5fd6b9287ef8b64f5807dbd0c"},
+ {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"},
+ {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"},
+ {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"},
+ {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"},
+ {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"},
+ {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"},
+ {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"},
+ {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"},
+ {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"},
+ {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"},
+ {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"},
+ {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"},
+ {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"},
+ {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"},
+ {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"},
+ {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"},
+ {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"},
+ {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"},
+ {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"},
+ {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:9adda1ff5fb9dcdf899ceca672a4e2ce9e797adb512a6467305ca3d8bfcfbdd0"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:6dde2cae6004ba7a3badff4a11911cae03ebf23e97eebfc0e71fef2530e5074f"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4a7fd678b35614fca708579eb95b7587a5e8a6d328171bd2488fd9f27d82be4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b80e3c7283a01a356bd2210f53d1a4a5d32b269c2024389ed0173137708d50e"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a8cc0e8176b762973758a77f0d9c4467d310e33165fb74173418ca3734944da4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5634b2e2f5f3d2b4439d2d74066e22eb4b1f04f3fea05cb2a3c12d89b5a3bcd"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b4ba635165bc1ea46f2da8e5d80b5f70f6ec42161e38d96dbef33bb39df73964"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e4c7c64172e7789bd8b07aa3087ea87c4c4de7e90937a2aa036b5d92332536"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1f74909ef7675c26d4095a817ec3393d67f3158ca4836c233212e5613ef640c4"},
+ {file = "tokenizers-0.20.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9b81321a1e05b16487d312b4264984513f8b4a7556229cafac6e88c2036b09"},
+ {file = "tokenizers-0.20.3-cp37-none-win32.whl", hash = "sha256:ab48184cd58b4a03022a2ec75b54c9f600ffea9a733612c02325ed636f353729"},
+ {file = "tokenizers-0.20.3-cp37-none-win_amd64.whl", hash = "sha256:60ac483cebee1c12c71878523e768df02fa17e4c54412966cb3ac862c91b36c1"},
+ {file = "tokenizers-0.20.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:3229ef103c89583d10b9378afa5d601b91e6337530a0988e17ca8d635329a996"},
+ {file = "tokenizers-0.20.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6ac52cc24bad3de865c7e65b1c4e7b70d00938a8ae09a92a453b8f676e714ad5"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04627b7b502fa6a2a005e1bd446fa4247d89abcb1afaa1b81eb90e21aba9a60f"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c27ceb887f0e81a3c377eb4605dca7a95a81262761c0fba308d627b2abb98f2b"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65ab780194da4e1fcf5670523a2f377c4838ebf5249efe41fa1eddd2a84fb49d"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98d343134f47159e81f7f242264b0eb222e6b802f37173c8d7d7b64d5c9d1388"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2475bb004ab2009d29aff13b5047bfdb3d4b474f0aa9d4faa13a7f34dbbbb43"},
+ {file = "tokenizers-0.20.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b6583a65c01db1197c1eb36857ceba8ec329d53afadd268b42a6b04f4965724"},
+ {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d00ba208358c037eeab7bfc00a905adc67b2d31b68ab40ed09d75881e114ea"},
+ {file = "tokenizers-0.20.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0fc7a39e5bedc817bda395a798dfe2d9c5f7c71153c90d381b5135a0328d9520"},
+ {file = "tokenizers-0.20.3-cp38-none-win32.whl", hash = "sha256:84d40ee0f8550d64d3ea92dd7d24a8557a9172165bdb986c9fb2503b4fe4e3b6"},
+ {file = "tokenizers-0.20.3-cp38-none-win_amd64.whl", hash = "sha256:205a45246ed7f1718cf3785cff88450ba603352412aaf220ace026384aa3f1c0"},
+ {file = "tokenizers-0.20.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:93e37f0269a11dc3b1a953f1fca9707f0929ebf8b4063c591c71a0664219988e"},
+ {file = "tokenizers-0.20.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f4cb0c614b0135e781de96c2af87e73da0389ac1458e2a97562ed26e29490d8d"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7eb2fb1c432f5746b22f8a7f09fc18c4156cb0031c77f53cb19379d82d43297a"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfa8d029bb156181b006643309d6b673615a24e4ed24cf03aa191d599b996f51"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f90549622de3bf476ad9f1dd6f3f952ec3ed6ab8615ae88ef060d0c5bfad55d"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a1d469c74eebf5c43fd61cd9b030e271d17198edd7bd45392e03a3c091d7d6d4"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bee8f53b2594749f4460d53253bae55d718f04e9b633efa0f5df8938bd98e4f0"},
+ {file = "tokenizers-0.20.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:938441babf3e5720e4459e306ef2809fb267680df9d1ff2873458b22aef60248"},
+ {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7310ab23d7b0caebecc0e8be11a1146f320f5f07284000f6ea54793e83de1b75"},
+ {file = "tokenizers-0.20.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:16121eb030a2b13094cfec936b0c12e8b4063c5f839591ea7d0212336d8f9921"},
+ {file = "tokenizers-0.20.3-cp39-none-win32.whl", hash = "sha256:401cc21ef642ee235985d747f65e18f639464d377c70836c9003df208d582064"},
+ {file = "tokenizers-0.20.3-cp39-none-win_amd64.whl", hash = "sha256:7498f3ea7746133335a6adb67a77cf77227a8b82c8483f644a2e5f86fea42b8d"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e919f2e3e68bb51dc31de4fcbbeff3bdf9c1cad489044c75e2b982a91059bd3c"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b8e9608f2773996cc272156e305bd79066163a66b0390fe21750aff62df1ac07"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39270a7050deaf50f7caff4c532c01b3c48f6608d42b3eacdebdc6795478c8df"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e005466632b1c5d2d2120f6de8aa768cc9d36cd1ab7d51d0c27a114c91a1e6ee"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a07962340b36189b6c8feda552ea1bfeee6cf067ff922a1d7760662c2ee229e5"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:55046ad3dd5f2b3c67501fcc8c9cbe3e901d8355f08a3b745e9b57894855f85b"},
+ {file = "tokenizers-0.20.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:efcf0eb939988b627558aaf2b9dc3e56d759cad2e0cfa04fcab378e4b48fc4fd"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f3558a7ae6a6d38a77dfce12172a1e2e1bf3e8871e744a1861cd7591ea9ebe24"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d53029fe44bc70c3ff14ef512460a0cf583495a0f8e2f4b70e26eb9438e38a9"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57a2a56397b2bec5a629b516b23f0f8a3e4f978c7488d4a299980f8375954b85"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e5bfaae740ef9ece000f8a07e78ac0e2b085c5ce9648f8593ddf0243c9f76d"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fbaf3ea28fedfb2283da60e710aff25492e795a7397cad8a50f1e079b65a5a70"},
+ {file = "tokenizers-0.20.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c47c037116310dc976eb96b008e41b9cfaba002ed8005848d4d632ee0b7ba9ae"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c31751f0721f58f5e19bb27c1acc259aeff860d8629c4e1a900b26a1979ada8e"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c697cbd3be7a79ea250ea5f380d6f12e534c543cfb137d5c734966b3ee4f34cc"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b48971b88ef9130bf35b41b35fd857c3c4dae4a9cd7990ebc7fc03e59cc92438"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e615de179bbe060ab33773f0d98a8a8572b5883dd7dac66c1de8c056c7e748c"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da1ec842035ed9999c62e45fbe0ff14b7e8a7e02bb97688cc6313cf65e5cd755"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6ee4954c1dd23aadc27958dad759006e71659d497dcb0ef0c7c87ea992c16ebd"},
+ {file = "tokenizers-0.20.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3eda46ca402751ec82553a321bf35a617b76bbed7586e768c02ccacbdda94d6d"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:de082392a85eb0055cc055c535bff2f0cc15d7a000bdc36fbf601a0f3cf8507a"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c3db46cc0647bfd88263afdb739b92017a02a87ee30945cb3e86c7e25c7c9917"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a292392f24ab9abac5cfa8197e5a6208f2e43723420217e1ceba0b4ec77816ac"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dcd91f4e60f62b20d83a87a84fe062035a1e3ff49a8c2bbdeb2d441c8e311f4"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:900991a2b8ee35961b1095db7e265342e0e42a84c1a594823d5ee9f8fb791958"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5a8d8261ca2133d4f98aa9627c748189502b3787537ba3d7e2beb4f7cfc5d627"},
+ {file = "tokenizers-0.20.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:c4fd4d71e6deb6ddf99d8d0eab87d1d16f635898906e631914a9bae8ae9f2cfb"},
+ {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"},
]
[package.dependencies]
@@ -1945,24 +1955,24 @@ testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
[[package]]
name = "tomli"
-version = "2.0.2"
+version = "2.1.0"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
files = [
- {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"},
- {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"},
+ {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"},
+ {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"},
]
[[package]]
name = "tqdm"
-version = "4.66.6"
+version = "4.67.0"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
files = [
- {file = "tqdm-4.66.6-py3-none-any.whl", hash = "sha256:223e8b5359c2efc4b30555531f09e9f2f3589bcd7fdd389271191031b49b7a63"},
- {file = "tqdm-4.66.6.tar.gz", hash = "sha256:4bdd694238bef1485ce839d67967ab50af8f9272aab687c0d7702a01da0be090"},
+ {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"},
+ {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"},
]
[package.dependencies]
@@ -1970,6 +1980,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
[package.extras]
dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+discord = ["requests"]
notebook = ["ipywidgets (>=6)"]
slack = ["slack-sdk"]
telegram = ["requests"]
@@ -2133,13 +2144,13 @@ files = [
[[package]]
name = "zipp"
-version = "3.20.2"
+version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
- {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
+ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
+ {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
@@ -2152,5 +2163,5 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
-python-versions = "^3.9"
-content-hash = "7885364fa002fb5d2dbb37bc97ebd71bf409573eddec0a520d5c370f89d8b9aa"
+python-versions = ">=3.9,<4"
+content-hash = "8c31e20928f328e20d6f769647f8bb3a064ca9ec1a7b53ed7111f1d9dcff5eb0"
diff --git a/pyproject.toml b/pyproject.toml
index 4721487f..ff1effa0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -31,27 +31,28 @@ packages = [
Repository = 'https://github.com/humanloop/humanloop-python'
[tool.poetry.dependencies]
-python = "^3.9"
+python = ">=3.9,<4"
httpx = ">=0.21.2"
httpx-sse = "0.4.0"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
parse = "^1.20.2"
-opentelemetry-sdk = "<=1.27.0"
-opentelemetry-api = "<=1.27.0"
-opentelemetry-instrumentation-openai = "<=0.33.3"
-opentelemetry-instrumentation-cohere = "<=0.33.3"
-opentelemetry-instrumentation-anthropic = "<=0.33.3"
-opentelemetry-instrumentation-groq = "<=0.33.3"
-opentelemetry-instrumentation-replicate = "<=0.33.3"
+opentelemetry-sdk = ">=1.20.0"
+opentelemetry-api = ">=1.20.0"
+opentelemetry-instrumentation-openai = ">=0.30"
+opentelemetry-instrumentation-cohere = ">=0.30"
+opentelemetry-instrumentation-anthropic = ">=0.30"
+opentelemetry-instrumentation-groq = ">=0.33.11"
+opentelemetry-instrumentation-replicate = ">=0.30"
+opentelemetry-instrumentation-bedrock = ">=0.0.1"
[tool.poetry.group.dev.dependencies]
-parse-type = "^0.6.4"
-anthropic = "^0.37.1"
-groq = "^0.11.0"
-cohere = "^5.11.2"
-replicate = "^1.0.3"
+parse-type = ">=0.6.4"
+anthropic = ">=0.37.1"
+groq = ">=0.11.0"
+cohere = ">=3.0"
+replicate = ">=1.0.3"
jsonschema = "^4.23.0"
types-jsonschema = "^4.23.0.20240813"
mypy = "^1.0.1"
@@ -62,7 +63,7 @@ types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
python-dotenv = "^1.0.1"
openai = "^1.52.2"
-pandas = "^2.2.3"
+pandas = ">=1.3.2"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index e39a61e7..d5637fcf 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -1,4 +1,3 @@
-import json
import logging
from functools import wraps
from typing import Any, Callable, Mapping, Optional, Sequence
@@ -64,7 +63,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Call the decorated function
try:
output = func(*args, **kwargs)
- output = jsonify_if_not_string(
+ output_stringified = jsonify_if_not_string(
func=func,
output=output,
)
@@ -72,11 +71,15 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
output = None
+ output_stringified = jsonify_if_not_string(
+ func=func,
+ output=None,
+ )
error = str(e)
flow_log = {
"inputs": inputs,
- "output": output,
+ "output": output_stringified,
"error": error,
}
if inputs:
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 2d1eb569..caffb512 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -54,7 +54,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Call the decorated function
try:
output = func(*args, **kwargs)
- output = jsonify_if_not_string(
+ output_stringified = jsonify_if_not_string(
func=func,
output=output,
)
@@ -62,11 +62,15 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
output = None
+ output_stringified = jsonify_if_not_string(
+ func=func,
+ output=output,
+ )
error = str(e)
prompt_log = {
"inputs": args_to_inputs(func, args, kwargs),
- "output": output,
+ "output": output_stringified,
"error": error,
}
write_to_opentelemetry_span(
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index bfc3e786..c7e38293 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -72,7 +72,7 @@ def wrapper(*args, **kwargs):
# Call the decorated function
try:
output = func(*args, **kwargs)
- output = jsonify_if_not_string(
+ output_stringified = jsonify_if_not_string(
func=func,
output=output,
)
@@ -80,12 +80,16 @@ def wrapper(*args, **kwargs):
except Exception as e:
logger.error(f"Error calling {func.__name__}: {e}")
output = None
+ output_stringified = jsonify_if_not_string(
+ func=func,
+ output=output,
+ )
error = str(e)
# Populate known Tool Log attributes
tool_log = {
"inputs": args_to_inputs(func, args, kwargs),
- "output": output,
+ "output": output_stringified,
"error": error,
}
diff --git a/src/humanloop/otel/__init__.py b/src/humanloop/otel/__init__.py
index f0c4cb1d..0a1eab92 100644
--- a/src/humanloop/otel/__init__.py
+++ b/src/humanloop/otel/__init__.py
@@ -37,6 +37,11 @@ def instrument_provider(provider: TracerProvider):
ReplicateInstrumentor().instrument(tracer_provider=provider)
+ if module_is_installed("boto3"):
+ from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
+
+ BedrockInstrumentor().instrument(tracer_provider=provider)
+
class FlowContext(TypedDict):
trace_id: NotRequired[str]
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index d9f133ee..340fbf75 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -31,12 +31,13 @@ def calculator(operation: str, num1: float, num2: float) -> float:
# WHEN calling the @tool decorated function
result = calculator(operation="add", num1=1, num2=2)
+ assert result == 3
# THEN a single span is created and the log and file attributes are correctly set
spans = exporter.get_finished_spans()
assert len(spans) == 1
hl_file: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_FILE_KEY)
hl_log: dict[str, Any] = read_from_opentelemetry_span(span=spans[0], key=HUMANLOOP_LOG_KEY)
- assert hl_log["output"] == result == 3
+ assert hl_log["output"] == str(result) == "3"
assert hl_log["inputs"] == {
"operation": "add",
"num1": 1,
@@ -408,7 +409,7 @@ def foo_bar(foo: Foo):
return foo.a + foo.b # type: ignore
# THEN a ValueError is raised
- assert exc.value.args[0].startswith("foo_bar: Unsupported type hint")
+ assert exc.value.args[0].startswith("Error parsing signature of @tool annotated function foo_bar")
def test_tool_as_higher_order_function(
From 03261c4f0b6c1278a19c386020670c36605de5d2 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 14:59:34 +0000
Subject: [PATCH 65/70] Poetry dependency relaxation follow-up
---
poetry.lock | 517 +++++++++--------------------
pyproject.toml | 21 +-
src/humanloop/decorators/prompt.py | 2 +-
src/humanloop/eval_utils/run.py | 10 +-
4 files changed, 169 insertions(+), 381 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index c6a0f9f0..c8c2b217 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -518,26 +518,22 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2
[[package]]
name = "importlib-metadata"
-version = "8.5.0"
+version = "8.4.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
- {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
+ {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"},
+ {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"},
]
[package.dependencies]
-zipp = ">=3.20"
+zipp = ">=0.5"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
-type = ["pytest-mypy"]
+test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "iniconfig"
@@ -552,84 +548,84 @@ files = [
[[package]]
name = "jiter"
-version = "0.7.0"
+version = "0.7.1"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
files = [
- {file = "jiter-0.7.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:e14027f61101b3f5e173095d9ecf95c1cac03ffe45a849279bde1d97e559e314"},
- {file = "jiter-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:979ec4711c2e37ac949561858bd42028884c9799516a923e1ff0b501ef341a4a"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:662d5d3cca58ad6af7a3c6226b641c8655de5beebcb686bfde0df0f21421aafa"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d89008fb47043a469f97ad90840b97ba54e7c3d62dc7cbb6cbf938bd0caf71d"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8b16c35c846a323ce9067170d5ab8c31ea3dbcab59c4f7608bbbf20c2c3b43f"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c9e82daaa1b0a68704f9029b81e664a5a9de3e466c2cbaabcda5875f961702e7"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a87a9f586636e1f0dd3651a91f79b491ea0d9fd7cbbf4f5c463eebdc48bda7"},
- {file = "jiter-0.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2ec05b1615f96cc3e4901678bc863958611584072967d9962f9e571d60711d52"},
- {file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a5cb97e35370bde7aa0d232a7f910f5a0fbbc96bc0a7dbaa044fd5cd6bcd7ec3"},
- {file = "jiter-0.7.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cb316dacaf48c8c187cea75d0d7f835f299137e6fdd13f691dff8f92914015c7"},
- {file = "jiter-0.7.0-cp310-none-win32.whl", hash = "sha256:243f38eb4072763c54de95b14ad283610e0cd3bf26393870db04e520f60eebb3"},
- {file = "jiter-0.7.0-cp310-none-win_amd64.whl", hash = "sha256:2221d5603c139f6764c54e37e7c6960c469cbcd76928fb10d15023ba5903f94b"},
- {file = "jiter-0.7.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:91cec0ad755bd786c9f769ce8d843af955df6a8e56b17658771b2d5cb34a3ff8"},
- {file = "jiter-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:feba70a28a27d962e353e978dbb6afd798e711c04cb0b4c5e77e9d3779033a1a"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9d866ec066c3616cacb8535dbda38bb1d470b17b25f0317c4540182bc886ce2"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7a7a00b6f9f18289dd563596f97ecaba6c777501a8ba04bf98e03087bcbc60"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9aaf564094c7db8687f2660605e099f3d3e6ea5e7135498486674fcb78e29165"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4d27e09825c1b3c7a667adb500ce8b840e8fc9f630da8454b44cdd4fb0081bb"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ca7c287da9c1d56dda88da1d08855a787dbb09a7e2bd13c66a2e288700bd7c7"},
- {file = "jiter-0.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db19a6d160f093cbc8cd5ea2abad420b686f6c0e5fb4f7b41941ebc6a4f83cda"},
- {file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e46a63c7f877cf7441ffc821c28287cfb9f533ae6ed707bde15e7d4dfafa7ae"},
- {file = "jiter-0.7.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7ba426fa7ff21cb119fa544b75dd3fbee6a70e55a5829709c0338d07ccd30e6d"},
- {file = "jiter-0.7.0-cp311-none-win32.whl", hash = "sha256:c07f55a64912b0c7982377831210836d2ea92b7bd343fca67a32212dd72e38e0"},
- {file = "jiter-0.7.0-cp311-none-win_amd64.whl", hash = "sha256:ed27b2c43e1b5f6c7fedc5c11d4d8bfa627de42d1143d87e39e2e83ddefd861a"},
- {file = "jiter-0.7.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac7930bcaaeb1e229e35c91c04ed2e9f39025b86ee9fc3141706bbf6fff4aeeb"},
- {file = "jiter-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:571feae3e7c901a8eedde9fd2865b0dfc1432fb15cab8c675a8444f7d11b7c5d"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8af4df8a262fa2778b68c2a03b6e9d1cb4d43d02bea6976d46be77a3a331af1"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd028d4165097a611eb0c7494d8c1f2aebd46f73ca3200f02a175a9c9a6f22f5"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6b487247c7836810091e9455efe56a52ec51bfa3a222237e1587d04d3e04527"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e6d28a92f28814e1a9f2824dc11f4e17e1df1f44dc4fdeb94c5450d34bcb2602"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90443994bbafe134f0b34201dad3ebe1c769f0599004084e046fb249ad912425"},
- {file = "jiter-0.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f9abf464f9faac652542ce8360cea8e68fba2b78350e8a170248f9bcc228702a"},
- {file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db7a8d99fc5f842f7d2852f06ccaed066532292c41723e5dff670c339b649f88"},
- {file = "jiter-0.7.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:15cf691ebd8693b70c94627d6b748f01e6d697d9a6e9f2bc310934fcfb7cf25e"},
- {file = "jiter-0.7.0-cp312-none-win32.whl", hash = "sha256:9dcd54fa422fb66ca398bec296fed5f58e756aa0589496011cfea2abb5be38a5"},
- {file = "jiter-0.7.0-cp312-none-win_amd64.whl", hash = "sha256:cc989951f73f9375b8eacd571baaa057f3d7d11b7ce6f67b9d54642e7475bfad"},
- {file = "jiter-0.7.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:24cecd18df540963cd27c08ca5ce1d0179f229ff78066d9eecbe5add29361340"},
- {file = "jiter-0.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d41b46236b90b043cca73785674c23d2a67d16f226394079d0953f94e765ed76"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b160db0987171365c153e406a45dcab0ee613ae3508a77bfff42515cb4ce4d6e"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d1c8d91e0f0bd78602eaa081332e8ee4f512c000716f5bc54e9a037306d693a7"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:997706c683195eeff192d2e5285ce64d2a610414f37da3a3f2625dcf8517cf90"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7ea52a8a0ff0229ab2920284079becd2bae0688d432fca94857ece83bb49c541"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d77449d2738cf74752bb35d75ee431af457e741124d1db5e112890023572c7c"},
- {file = "jiter-0.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8203519907a1d81d6cb00902c98e27c2d0bf25ce0323c50ca594d30f5f1fbcf"},
- {file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41d15ccc53931c822dd7f1aebf09faa3cda2d7b48a76ef304c7dbc19d1302e51"},
- {file = "jiter-0.7.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:febf3179b2fabf71fbd2fd52acb8594163bb173348b388649567a548f356dbf6"},
- {file = "jiter-0.7.0-cp313-none-win32.whl", hash = "sha256:4a8e2d866e7eda19f012444e01b55079d8e1c4c30346aaac4b97e80c54e2d6d3"},
- {file = "jiter-0.7.0-cp313-none-win_amd64.whl", hash = "sha256:7417c2b928062c496f381fb0cb50412eee5ad1d8b53dbc0e011ce45bb2de522c"},
- {file = "jiter-0.7.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:9c62c737b5368e51e74960a08fe1adc807bd270227291daede78db24d5fbf556"},
- {file = "jiter-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e4640722b1bef0f6e342fe4606aafaae0eb4f4be5c84355bb6867f34400f6688"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f367488c3b9453eab285424c61098faa1cab37bb49425e69c8dca34f2dfe7d69"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0cf5d42beb3514236459454e3287db53d9c4d56c4ebaa3e9d0efe81b19495129"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cc5190ea1113ee6f7252fa8a5fe5a6515422e378356c950a03bbde5cafbdbaab"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:63ee47a149d698796a87abe445fc8dee21ed880f09469700c76c8d84e0d11efd"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48592c26ea72d3e71aa4bea0a93454df907d80638c3046bb0705507b6704c0d7"},
- {file = "jiter-0.7.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:79fef541199bd91cfe8a74529ecccb8eaf1aca38ad899ea582ebbd4854af1e51"},
- {file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d1ef6bb66041f2514739240568136c81b9dcc64fd14a43691c17ea793b6535c0"},
- {file = "jiter-0.7.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aca4d950863b1c238e315bf159466e064c98743eef3bd0ff9617e48ff63a4715"},
- {file = "jiter-0.7.0-cp38-none-win32.whl", hash = "sha256:897745f230350dcedb8d1ebe53e33568d48ea122c25e6784402b6e4e88169be7"},
- {file = "jiter-0.7.0-cp38-none-win_amd64.whl", hash = "sha256:b928c76a422ef3d0c85c5e98c498ce3421b313c5246199541e125b52953e1bc0"},
- {file = "jiter-0.7.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c9b669ff6f8ba08270dee9ccf858d3b0203b42314a428a1676762f2d390fbb64"},
- {file = "jiter-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5be919bacd73ca93801c3042bce6e95cb9c555a45ca83617b9b6c89df03b9c2"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a282e1e8a396dabcea82d64f9d05acf7efcf81ecdd925b967020dcb0e671c103"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:17ecb1a578a56e97a043c72b463776b5ea30343125308f667fb8fce4b3796735"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7b6045fa0527129218cdcd8a8b839f678219686055f31ebab35f87d354d9c36e"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:189cc4262a92e33c19d4fd24018f5890e4e6da5b2581f0059938877943f8298c"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c138414839effbf30d185e30475c6dc8a16411a1e3681e5fd4605ab1233ac67a"},
- {file = "jiter-0.7.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2791604acef33da6b72d5ecf885a32384bcaf9aa1e4be32737f3b8b9588eef6a"},
- {file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae60ec89037a78d60bbf3d8b127f1567769c8fa24886e0abed3f622791dea478"},
- {file = "jiter-0.7.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:836f03dea312967635233d826f783309b98cfd9ccc76ac776e224cfcef577862"},
- {file = "jiter-0.7.0-cp39-none-win32.whl", hash = "sha256:ebc30ae2ce4bc4986e1764c404b4ea1924f926abf02ce92516485098f8545374"},
- {file = "jiter-0.7.0-cp39-none-win_amd64.whl", hash = "sha256:abf596f951370c648f37aa9899deab296c42a3829736e598b0dd10b08f77a44d"},
- {file = "jiter-0.7.0.tar.gz", hash = "sha256:c061d9738535497b5509f8970584f20de1e900806b239a39a9994fc191dad630"},
+ {file = "jiter-0.7.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:262e96d06696b673fad6f257e6a0abb6e873dc22818ca0e0600f4a1189eb334f"},
+ {file = "jiter-0.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be6de02939aac5be97eb437f45cfd279b1dc9de358b13ea6e040e63a3221c40d"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935f10b802bc1ce2b2f61843e498c7720aa7f4e4bb7797aa8121eab017293c3d"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9cd3cccccabf5064e4bb3099c87bf67db94f805c1e62d1aefd2b7476e90e0ee2"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4aa919ebfc5f7b027cc368fe3964c0015e1963b92e1db382419dadb098a05192"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ae2d01e82c94491ce4d6f461a837f63b6c4e6dd5bb082553a70c509034ff3d4"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f9568cd66dbbdab67ae1b4c99f3f7da1228c5682d65913e3f5f95586b3cb9a9"},
+ {file = "jiter-0.7.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9ecbf4e20ec2c26512736284dc1a3f8ed79b6ca7188e3b99032757ad48db97dc"},
+ {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1a0508fddc70ce00b872e463b387d49308ef02b0787992ca471c8d4ba1c0fa1"},
+ {file = "jiter-0.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f84c9996664c460f24213ff1e5881530abd8fafd82058d39af3682d5fd2d6316"},
+ {file = "jiter-0.7.1-cp310-none-win32.whl", hash = "sha256:c915e1a1960976ba4dfe06551ea87063b2d5b4d30759012210099e712a414d9f"},
+ {file = "jiter-0.7.1-cp310-none-win_amd64.whl", hash = "sha256:75bf3b7fdc5c0faa6ffffcf8028a1f974d126bac86d96490d1b51b3210aa0f3f"},
+ {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"},
+ {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"},
+ {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"},
+ {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"},
+ {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"},
+ {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"},
+ {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"},
+ {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"},
+ {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"},
+ {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"},
+ {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"},
+ {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"},
+ {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"},
+ {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"},
+ {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"},
+ {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"},
+ {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"},
+ {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"},
+ {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"},
+ {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"},
+ {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"},
+ {file = "jiter-0.7.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:c65a3ce72b679958b79d556473f192a4dfc5895e8cc1030c9f4e434690906076"},
+ {file = "jiter-0.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e80052d3db39f9bb8eb86d207a1be3d9ecee5e05fdec31380817f9609ad38e60"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70a497859c4f3f7acd71c8bd89a6f9cf753ebacacf5e3e799138b8e1843084e3"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c1288bc22b9e36854a0536ba83666c3b1fb066b811019d7b682c9cf0269cdf9f"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b096ca72dd38ef35675e1d3b01785874315182243ef7aea9752cb62266ad516f"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8dbbd52c50b605af13dbee1a08373c520e6fcc6b5d32f17738875847fea4e2cd"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af29c5c6eb2517e71ffa15c7ae9509fa5e833ec2a99319ac88cc271eca865519"},
+ {file = "jiter-0.7.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f114a4df1e40c03c0efbf974b376ed57756a1141eb27d04baee0680c5af3d424"},
+ {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:191fbaee7cf46a9dd9b817547bf556facde50f83199d07fc48ebeff4082f9df4"},
+ {file = "jiter-0.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e2b445e5ee627fb4ee6bbceeb486251e60a0c881a8e12398dfdff47c56f0723"},
+ {file = "jiter-0.7.1-cp38-none-win32.whl", hash = "sha256:47ac4c3cf8135c83e64755b7276339b26cd3c7ddadf9e67306ace4832b283edf"},
+ {file = "jiter-0.7.1-cp38-none-win_amd64.whl", hash = "sha256:60b49c245cd90cde4794f5c30f123ee06ccf42fb8730a019a2870cd005653ebd"},
+ {file = "jiter-0.7.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8f212eeacc7203256f526f550d105d8efa24605828382cd7d296b703181ff11d"},
+ {file = "jiter-0.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d9e247079d88c00e75e297e6cb3a18a039ebcd79fefc43be9ba4eb7fb43eb726"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0aacaa56360139c53dcf352992b0331f4057a0373bbffd43f64ba0c32d2d155"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bc1b55314ca97dbb6c48d9144323896e9c1a25d41c65bcb9550b3e0c270ca560"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f281aae41b47e90deb70e7386558e877a8e62e1693e0086f37d015fa1c102289"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:93c20d2730a84d43f7c0b6fb2579dc54335db742a59cf9776d0b80e99d587382"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e81ccccd8069110e150613496deafa10da2f6ff322a707cbec2b0d52a87b9671"},
+ {file = "jiter-0.7.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0a7d5e85766eff4c9be481d77e2226b4c259999cb6862ccac5ef6621d3c8dcce"},
+ {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f52ce5799df5b6975439ecb16b1e879d7655e1685b6e3758c9b1b97696313bfb"},
+ {file = "jiter-0.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e0c91a0304373fdf97d56f88356a010bba442e6d995eb7773cbe32885b71cdd8"},
+ {file = "jiter-0.7.1-cp39-none-win32.whl", hash = "sha256:5c08adf93e41ce2755970e8aa95262298afe2bf58897fb9653c47cd93c3c6cdc"},
+ {file = "jiter-0.7.1-cp39-none-win_amd64.whl", hash = "sha256:6592f4067c74176e5f369228fb2995ed01400c9e8e1225fb73417183a5e635f0"},
+ {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"},
]
[[package]]
@@ -731,124 +727,6 @@ files = [
{file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
]
-[[package]]
-name = "numpy"
-version = "2.0.2"
-description = "Fundamental package for array computing in Python"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"},
- {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"},
- {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"},
- {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"},
- {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"},
- {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"},
- {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"},
- {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"},
- {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"},
- {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"},
- {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"},
- {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"},
- {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"},
- {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"},
- {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"},
- {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"},
- {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"},
- {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"},
- {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"},
- {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"},
- {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"},
- {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"},
- {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"},
- {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"},
- {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"},
- {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"},
- {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"},
- {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"},
- {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"},
- {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"},
- {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"},
- {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"},
- {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"},
- {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"},
- {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"},
- {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"},
- {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"},
- {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"},
- {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"},
- {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"},
- {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"},
- {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"},
- {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"},
- {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"},
- {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"},
-]
-
-[[package]]
-name = "numpy"
-version = "2.1.3"
-description = "Fundamental package for array computing in Python"
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "numpy-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c894b4305373b9c5576d7a12b473702afdf48ce5369c074ba304cc5ad8730dff"},
- {file = "numpy-2.1.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b47fbb433d3260adcd51eb54f92a2ffbc90a4595f8970ee00e064c644ac788f5"},
- {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:825656d0743699c529c5943554d223c021ff0494ff1442152ce887ef4f7561a1"},
- {file = "numpy-2.1.3-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:6a4825252fcc430a182ac4dee5a505053d262c807f8a924603d411f6718b88fd"},
- {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e711e02f49e176a01d0349d82cb5f05ba4db7d5e7e0defd026328e5cfb3226d3"},
- {file = "numpy-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78574ac2d1a4a02421f25da9559850d59457bac82f2b8d7a44fe83a64f770098"},
- {file = "numpy-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c7662f0e3673fe4e832fe07b65c50342ea27d989f92c80355658c7f888fcc83c"},
- {file = "numpy-2.1.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fa2d1337dc61c8dc417fbccf20f6d1e139896a30721b7f1e832b2bb6ef4eb6c4"},
- {file = "numpy-2.1.3-cp310-cp310-win32.whl", hash = "sha256:72dcc4a35a8515d83e76b58fdf8113a5c969ccd505c8a946759b24e3182d1f23"},
- {file = "numpy-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:ecc76a9ba2911d8d37ac01de72834d8849e55473457558e12995f4cd53e778e0"},
- {file = "numpy-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4d1167c53b93f1f5d8a139a742b3c6f4d429b54e74e6b57d0eff40045187b15d"},
- {file = "numpy-2.1.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c80e4a09b3d95b4e1cac08643f1152fa71a0a821a2d4277334c88d54b2219a41"},
- {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:576a1c1d25e9e02ed7fa5477f30a127fe56debd53b8d2c89d5578f9857d03ca9"},
- {file = "numpy-2.1.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:973faafebaae4c0aaa1a1ca1ce02434554d67e628b8d805e61f874b84e136b09"},
- {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:762479be47a4863e261a840e8e01608d124ee1361e48b96916f38b119cfda04a"},
- {file = "numpy-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc6f24b3d1ecc1eebfbf5d6051faa49af40b03be1aaa781ebdadcbc090b4539b"},
- {file = "numpy-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:17ee83a1f4fef3c94d16dc1802b998668b5419362c8a4f4e8a491de1b41cc3ee"},
- {file = "numpy-2.1.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:15cb89f39fa6d0bdfb600ea24b250e5f1a3df23f901f51c8debaa6a5d122b2f0"},
- {file = "numpy-2.1.3-cp311-cp311-win32.whl", hash = "sha256:d9beb777a78c331580705326d2367488d5bc473b49a9bc3036c154832520aca9"},
- {file = "numpy-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:d89dd2b6da69c4fff5e39c28a382199ddedc3a5be5390115608345dec660b9e2"},
- {file = "numpy-2.1.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f55ba01150f52b1027829b50d70ef1dafd9821ea82905b63936668403c3b471e"},
- {file = "numpy-2.1.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13138eadd4f4da03074851a698ffa7e405f41a0845a6b1ad135b81596e4e9958"},
- {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a6b46587b14b888e95e4a24d7b13ae91fa22386c199ee7b418f449032b2fa3b8"},
- {file = "numpy-2.1.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:0fa14563cc46422e99daef53d725d0c326e99e468a9320a240affffe87852564"},
- {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8637dcd2caa676e475503d1f8fdb327bc495554e10838019651b76d17b98e512"},
- {file = "numpy-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2312b2aa89e1f43ecea6da6ea9a810d06aae08321609d8dc0d0eda6d946a541b"},
- {file = "numpy-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a38c19106902bb19351b83802531fea19dee18e5b37b36454f27f11ff956f7fc"},
- {file = "numpy-2.1.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:02135ade8b8a84011cbb67dc44e07c58f28575cf9ecf8ab304e51c05528c19f0"},
- {file = "numpy-2.1.3-cp312-cp312-win32.whl", hash = "sha256:e6988e90fcf617da2b5c78902fe8e668361b43b4fe26dbf2d7b0f8034d4cafb9"},
- {file = "numpy-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:0d30c543f02e84e92c4b1f415b7c6b5326cbe45ee7882b6b77db7195fb971e3a"},
- {file = "numpy-2.1.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:96fe52fcdb9345b7cd82ecd34547fca4321f7656d500eca497eb7ea5a926692f"},
- {file = "numpy-2.1.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f653490b33e9c3a4c1c01d41bc2aef08f9475af51146e4a7710c450cf9761598"},
- {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dc258a761a16daa791081d026f0ed4399b582712e6fc887a95af09df10c5ca57"},
- {file = "numpy-2.1.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:016d0f6f5e77b0f0d45d77387ffa4bb89816b57c835580c3ce8e099ef830befe"},
- {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c181ba05ce8299c7aa3125c27b9c2167bca4a4445b7ce73d5febc411ca692e43"},
- {file = "numpy-2.1.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5641516794ca9e5f8a4d17bb45446998c6554704d888f86df9b200e66bdcce56"},
- {file = "numpy-2.1.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ea4dedd6e394a9c180b33c2c872b92f7ce0f8e7ad93e9585312b0c5a04777a4a"},
- {file = "numpy-2.1.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0df3635b9c8ef48bd3be5f862cf71b0a4716fa0e702155c45067c6b711ddcef"},
- {file = "numpy-2.1.3-cp313-cp313-win32.whl", hash = "sha256:50ca6aba6e163363f132b5c101ba078b8cbd3fa92c7865fd7d4d62d9779ac29f"},
- {file = "numpy-2.1.3-cp313-cp313-win_amd64.whl", hash = "sha256:747641635d3d44bcb380d950679462fae44f54b131be347d5ec2bce47d3df9ed"},
- {file = "numpy-2.1.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:996bb9399059c5b82f76b53ff8bb686069c05acc94656bb259b1d63d04a9506f"},
- {file = "numpy-2.1.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:45966d859916ad02b779706bb43b954281db43e185015df6eb3323120188f9e4"},
- {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:baed7e8d7481bfe0874b566850cb0b85243e982388b7b23348c6db2ee2b2ae8e"},
- {file = "numpy-2.1.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f7f672a3388133335589cfca93ed468509cb7b93ba3105fce780d04a6576a0"},
- {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7aac50327da5d208db2eec22eb11e491e3fe13d22653dce51b0f4109101b408"},
- {file = "numpy-2.1.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4394bc0dbd074b7f9b52024832d16e019decebf86caf909d94f6b3f77a8ee3b6"},
- {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:50d18c4358a0a8a53f12a8ba9d772ab2d460321e6a93d6064fc22443d189853f"},
- {file = "numpy-2.1.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:14e253bd43fc6b37af4921b10f6add6925878a42a0c5fe83daee390bca80bc17"},
- {file = "numpy-2.1.3-cp313-cp313t-win32.whl", hash = "sha256:08788d27a5fd867a663f6fc753fd7c3ad7e92747efc73c53bca2f19f8bc06f48"},
- {file = "numpy-2.1.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2564fbdf2b99b3f815f2107c1bbc93e2de8ee655a69c261363a1172a79a257d4"},
- {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4f2015dfe437dfebbfce7c85c7b53d81ba49e71ba7eadbf1df40c915af75979f"},
- {file = "numpy-2.1.3-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:3522b0dfe983a575e6a9ab3a4a4dfe156c3e428468ff08ce582b9bb6bd1d71d4"},
- {file = "numpy-2.1.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c006b607a865b07cd981ccb218a04fc86b600411d83d6fc261357f1c0966755d"},
- {file = "numpy-2.1.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e14e26956e6f1696070788252dcdff11b4aca4c3e8bd166e0df1bb8f315a67cb"},
- {file = "numpy-2.1.3.tar.gz", hash = "sha256:aa08e04e08aaf974d4458def539dece0d28146d866a39da5639596f4921fd761"},
-]
-
[[package]]
name = "openai"
version = "1.54.3"
@@ -875,170 +753,169 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
[[package]]
name = "opentelemetry-api"
-version = "1.28.1"
+version = "1.27.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_api-1.28.1-py3-none-any.whl", hash = "sha256:bfe86c95576cf19a914497f439fd79c9553a38de0adbdc26f7cfc46b0c00b16c"},
- {file = "opentelemetry_api-1.28.1.tar.gz", hash = "sha256:6fa7295a12c707f5aebef82da3d9ec5afe6992f3e42bfe7bec0339a44b3518e7"},
+ {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"},
+ {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-importlib-metadata = ">=6.0,<=8.5.0"
+importlib-metadata = ">=6.0,<=8.4.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.49b1"
+version = "0.48b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation-0.49b1-py3-none-any.whl", hash = "sha256:0a9d3821736104013693ef3b8a9d29b41f2f3a81ee2d8c9288b52d62bae5747c"},
- {file = "opentelemetry_instrumentation-0.49b1.tar.gz", hash = "sha256:2d0e41181b7957ba061bb436b969ad90545ac3eba65f290830009b4264d2824e"},
+ {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"},
+ {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.49b1"
-packaging = ">=18.0"
+setuptools = ">=16.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.33.11-py3-none-any.whl", hash = "sha256:4e9622fcac4cb4e09bcefe8d6f5cf0776a5e20e073133ebb0b7c4e82f2b0b06a"},
- {file = "opentelemetry_instrumentation_anthropic-0.33.11.tar.gz", hash = "sha256:b4671cec5ef7e8b138c2a3d37a8d99e757fdda1a7c67e0cbc28cfca1928b23bb"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.9-py3-none-any.whl", hash = "sha256:443fc46d7de9d95a86efebb4de1119672ba86f6da113cc7e1bb8129ce9978439"},
+ {file = "opentelemetry_instrumentation_anthropic-0.33.9.tar.gz", hash = "sha256:1866e832a777cfd407f83b3782f0788e702a9ede02eaaf7b6680d32f0c03d1e2"},
]
[package.dependencies]
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.33.11-py3-none-any.whl", hash = "sha256:ccafdd60dfe10f0f9a0dfbf0a32ec998d120f4af46246b11d89863ad6ea9a9f1"},
- {file = "opentelemetry_instrumentation_bedrock-0.33.11.tar.gz", hash = "sha256:3457f439488e6674da2b9180f4f73ecb75868e5315b81f11217cd3dea97a0c1e"},
+ {file = "opentelemetry_instrumentation_bedrock-0.33.9-py3-none-any.whl", hash = "sha256:b6e1ac590b3c0c5bb1df0266feb9d6e349df396d4b3d1a0da5377cb8e6e16816"},
+ {file = "opentelemetry_instrumentation_bedrock-0.33.9.tar.gz", hash = "sha256:4441e5f2093edb1cbcd05298a39d180ea88d6efeb1bbe355886a97a57f6b542e"},
]
[package.dependencies]
anthropic = ">=0.17.0"
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.33.11-py3-none-any.whl", hash = "sha256:13c9afab4a9c0a90d33ac1bb7530535b0acee31dc056a53ae78d0daf879fdf26"},
- {file = "opentelemetry_instrumentation_cohere-0.33.11.tar.gz", hash = "sha256:07ebd381cc7ce0d14bc61649e2191f2a65962c609439979447fc8b1d09580310"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.9-py3-none-any.whl", hash = "sha256:a94ab72d0c438a154236f9907acee1a07f581408dbd8b06f0cb9301ef29b656b"},
+ {file = "opentelemetry_instrumentation_cohere-0.33.9.tar.gz", hash = "sha256:931f24768337026a933cb7dd4850530e0545772f08abaf37f4664f1e768b73db"},
]
[package.dependencies]
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.33.11-py3-none-any.whl", hash = "sha256:c3c204abfd9a0096eb8b7aacd719b62b87ad812022bbb0c9ee38e05508123cb7"},
- {file = "opentelemetry_instrumentation_groq-0.33.11.tar.gz", hash = "sha256:26792d1542c8f1d59aa65e83e61fa452f239c9dbf860c545e8268f944413af80"},
+ {file = "opentelemetry_instrumentation_groq-0.33.9-py3-none-any.whl", hash = "sha256:52256832c06f9d1ba8c11efce0854f012e7900c313e410a02c8feb85b0e35407"},
+ {file = "opentelemetry_instrumentation_groq-0.33.9.tar.gz", hash = "sha256:d83201c516a760fdc478413b855c6d9fb1aed48eb8d4166fa2dc7c762058f6b1"},
]
[package.dependencies]
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.33.11-py3-none-any.whl", hash = "sha256:9289f8c4e0989fae95549840d45f797a1269899b5d4264bdf0be356b8f7c3f7e"},
- {file = "opentelemetry_instrumentation_openai-0.33.11.tar.gz", hash = "sha256:bacb7fb8facb2c45076437c5b3e526fda486db3b76edbb01b18e4e133b70eed0"},
+ {file = "opentelemetry_instrumentation_openai-0.33.9-py3-none-any.whl", hash = "sha256:9a54ec31a66c212cd42b7f02701beecea4068effdf227b11c96fecfbc6544f40"},
+ {file = "opentelemetry_instrumentation_openai-0.33.9.tar.gz", hash = "sha256:5989a6049e63a09a6e9d699c077f7bbc932c0bda5a08f9ec0f4e88fd0c38d8b7"},
]
[package.dependencies]
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.33.11"
+version = "0.33.9"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.33.11-py3-none-any.whl", hash = "sha256:58619b28979efd83032e941d53850110bac62b125ff3e7e8b57d3f42f4e9170a"},
- {file = "opentelemetry_instrumentation_replicate-0.33.11.tar.gz", hash = "sha256:52cdf7e5a6b5cd663aa1a1b3a1771c77b9c9dbbffe5ada1acf28d266650950ae"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.9-py3-none-any.whl", hash = "sha256:cf2a0b83dfd150cb7a6827d405b088ed0a46beec7f652bfcc4acb5ffd3d2044a"},
+ {file = "opentelemetry_instrumentation_replicate-0.33.9.tar.gz", hash = "sha256:e18f2ce224ae1efc2158263aaec6c7b487d7498da9a08d1a594df484e86fce88"},
]
[package.dependencies]
-opentelemetry-api = ">=1.28.0,<2.0.0"
-opentelemetry-instrumentation = ">=0.49b0,<0.50"
-opentelemetry-semantic-conventions = ">=0.49b0,<0.50"
+opentelemetry-api = ">=1.27.0,<2.0.0"
+opentelemetry-instrumentation = ">=0.48b0,<0.49"
+opentelemetry-semantic-conventions = ">=0.48b0,<0.49"
opentelemetry-semantic-conventions-ai = "0.4.2"
[[package]]
name = "opentelemetry-sdk"
-version = "1.28.1"
+version = "1.27.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_sdk-1.28.1-py3-none-any.whl", hash = "sha256:72aad7f5fcbe37113c4ab4899f6cdeb6ac77ed3e62f25a85e3627b12583dad0f"},
- {file = "opentelemetry_sdk-1.28.1.tar.gz", hash = "sha256:100fa371b2046ffba6a340c18f0b2a0463acad7461e5177e126693b613a6ca57"},
+ {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"},
+ {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"},
]
[package.dependencies]
-opentelemetry-api = "1.28.1"
-opentelemetry-semantic-conventions = "0.49b1"
+opentelemetry-api = "1.27.0"
+opentelemetry-semantic-conventions = "0.48b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.49b1"
+version = "0.48b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_semantic_conventions-0.49b1-py3-none-any.whl", hash = "sha256:dd6f3ac8169d2198c752e1a63f827e5f5e110ae9b0ce33f2aad9a3baf0739743"},
- {file = "opentelemetry_semantic_conventions-0.49b1.tar.gz", hash = "sha256:91817883b159ffb94c2ca9548509c4fe0aafce7c24f437aa6ac3fc613aa9a758"},
+ {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"},
+ {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-opentelemetry-api = "1.28.1"
+opentelemetry-api = "1.27.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
@@ -1062,92 +939,6 @@ files = [
{file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"},
]
-[[package]]
-name = "pandas"
-version = "2.2.3"
-description = "Powerful data structures for data analysis, time series, and statistics"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
- {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
- {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"},
- {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"},
- {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"},
- {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"},
- {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"},
- {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"},
- {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"},
- {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"},
- {file = "pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c124333816c3a9b03fbeef3a9f230ba9a737e9e5bb4060aa2107a86cc0a497fc"},
- {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:63cc132e40a2e084cf01adf0775b15ac515ba905d7dcca47e9a251819c575ef3"},
- {file = "pandas-2.2.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:29401dbfa9ad77319367d36940cd8a0b3a11aba16063e39632d98b0e931ddf32"},
- {file = "pandas-2.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:3fc6873a41186404dad67245896a6e440baacc92f5b716ccd1bc9ed2995ab2c5"},
- {file = "pandas-2.2.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b1d432e8d08679a40e2a6d8b2f9770a5c21793a6f9f47fdd52c5ce1948a5a8a9"},
- {file = "pandas-2.2.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a5a1595fe639f5988ba6a8e5bc9649af3baf26df3998a0abe56c02609392e0a4"},
- {file = "pandas-2.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5de54125a92bb4d1c051c0659e6fcb75256bf799a732a87184e5ea503965bce3"},
- {file = "pandas-2.2.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fffb8ae78d8af97f849404f21411c95062db1496aeb3e56f146f0355c9989319"},
- {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dfcb5ee8d4d50c06a51c2fffa6cff6272098ad6540aed1a76d15fb9318194d8"},
- {file = "pandas-2.2.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:062309c1b9ea12a50e8ce661145c6aab431b1e99530d3cd60640e255778bd43a"},
- {file = "pandas-2.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:59ef3764d0fe818125a5097d2ae867ca3fa64df032331b7e0917cf5d7bf66b13"},
- {file = "pandas-2.2.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f00d1345d84d8c86a63e476bb4955e46458b304b9575dcf71102b5c705320015"},
- {file = "pandas-2.2.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3508d914817e153ad359d7e069d752cdd736a247c322d932eb89e6bc84217f28"},
- {file = "pandas-2.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22a9d949bfc9a502d320aa04e5d02feab689d61da4e7764b62c30b991c42c5f0"},
- {file = "pandas-2.2.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a255b2c19987fbbe62a9dfd6cff7ff2aa9ccab3fc75218fd4b7530f01efa24"},
- {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:800250ecdadb6d9c78eae4990da62743b857b470883fa27f652db8bdde7f6659"},
- {file = "pandas-2.2.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6374c452ff3ec675a8f46fd9ab25c4ad0ba590b71cf0656f8b6daa5202bca3fb"},
- {file = "pandas-2.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:61c5ad4043f791b61dd4752191d9f07f0ae412515d59ba8f005832a532f8736d"},
- {file = "pandas-2.2.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3b71f27954685ee685317063bf13c7709a7ba74fc996b84fc6821c59b0f06468"},
- {file = "pandas-2.2.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:38cf8125c40dae9d5acc10fa66af8ea6fdf760b2714ee482ca691fc66e6fcb18"},
- {file = "pandas-2.2.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ba96630bc17c875161df3818780af30e43be9b166ce51c9a18c1feae342906c2"},
- {file = "pandas-2.2.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db71525a1538b30142094edb9adc10be3f3e176748cd7acc2240c2f2e5aa3a4"},
- {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:15c0e1e02e93116177d29ff83e8b1619c93ddc9c49083f237d4312337a61165d"},
- {file = "pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a"},
- {file = "pandas-2.2.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc6b93f9b966093cb0fd62ff1a7e4c09e6d546ad7c1de191767baffc57628f39"},
- {file = "pandas-2.2.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5dbca4c1acd72e8eeef4753eeca07de9b1db4f398669d5994086f788a5d7cc30"},
- {file = "pandas-2.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8cd6d7cc958a3910f934ea8dbdf17b2364827bb4dafc38ce6eef6bb3d65ff09c"},
- {file = "pandas-2.2.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99df71520d25fade9db7c1076ac94eb994f4d2673ef2aa2e86ee039b6746d20c"},
- {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:31d0ced62d4ea3e231a9f228366919a5ea0b07440d9d4dac345376fd8e1477ea"},
- {file = "pandas-2.2.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7eee9e7cea6adf3e3d24e304ac6b8300646e2a5d1cd3a3c2abed9101b0846761"},
- {file = "pandas-2.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:4850ba03528b6dd51d6c5d273c46f183f39a9baf3f0143e566b89450965b105e"},
- {file = "pandas-2.2.3.tar.gz", hash = "sha256:4f18ba62b61d7e192368b84517265a99b4d7ee8912f8708660fb4a366cc82667"},
-]
-
-[package.dependencies]
-numpy = [
- {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
- {version = ">=1.22.4", markers = "python_version < \"3.11\""},
- {version = ">=1.23.2", markers = "python_version == \"3.11\""},
-]
-python-dateutil = ">=2.8.2"
-pytz = ">=2020.1"
-tzdata = ">=2022.7"
-
-[package.extras]
-all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"]
-aws = ["s3fs (>=2022.11.0)"]
-clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"]
-compression = ["zstandard (>=0.19.0)"]
-computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"]
-consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
-excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"]
-feather = ["pyarrow (>=10.0.1)"]
-fss = ["fsspec (>=2022.11.0)"]
-gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"]
-hdf5 = ["tables (>=3.8.0)"]
-html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"]
-mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"]
-output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"]
-parquet = ["pyarrow (>=10.0.1)"]
-performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"]
-plot = ["matplotlib (>=3.6.3)"]
-postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"]
-pyarrow = ["pyarrow (>=10.0.1)"]
-spss = ["pyreadstat (>=1.2.0)"]
-sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"]
-test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"]
-xml = ["lxml (>=4.9.2)"]
-
[[package]]
name = "parameterized"
version = "0.9.0"
@@ -1400,17 +1191,6 @@ files = [
[package.extras]
cli = ["click (>=5.0)"]
-[[package]]
-name = "pytz"
-version = "2024.2"
-description = "World timezone definitions, modern and historical"
-optional = false
-python-versions = "*"
-files = [
- {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
- {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
-]
-
[[package]]
name = "pyyaml"
version = "6.0.2"
@@ -1755,6 +1535,26 @@ files = [
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
+[[package]]
+name = "setuptools"
+version = "75.4.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "setuptools-75.4.0-py3-none-any.whl", hash = "sha256:b3c5d862f98500b06ffdf7cc4499b48c46c317d8d56cb30b5c8bce4d88f5c216"},
+ {file = "setuptools-75.4.0.tar.gz", hash = "sha256:1dc484f5cf56fd3fe7216d7b8df820802e7246cfb534a1db2aa64f14fcb9cdcb"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.7.0)"]
+core = ["importlib-metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12,<1.14)", "pytest-mypy"]
+
[[package]]
name = "six"
version = "1.16.0"
@@ -2035,17 +1835,6 @@ files = [
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
-[[package]]
-name = "tzdata"
-version = "2024.2"
-description = "Provider of IANA time zone data"
-optional = false
-python-versions = ">=2"
-files = [
- {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"},
- {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"},
-]
-
[[package]]
name = "urllib3"
version = "2.2.3"
@@ -2164,4 +1953,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4"
-content-hash = "8c31e20928f328e20d6f769647f8bb3a064ca9ec1a7b53ed7111f1d9dcff5eb0"
+content-hash = "26f6c5843461d01e9766383cef4b4f4febb0b95ea43db90bbedbe905793a0cfd"
diff --git a/pyproject.toml b/pyproject.toml
index ff1effa0..8012d868 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -36,16 +36,16 @@ httpx = ">=0.21.2"
httpx-sse = "0.4.0"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
-typing_extensions = ">= 4.0.0"
-parse = "^1.20.2"
-opentelemetry-sdk = ">=1.20.0"
-opentelemetry-api = ">=1.20.0"
-opentelemetry-instrumentation-openai = ">=0.30"
-opentelemetry-instrumentation-cohere = ">=0.30"
-opentelemetry-instrumentation-anthropic = ">=0.30"
-opentelemetry-instrumentation-groq = ">=0.33.11"
-opentelemetry-instrumentation-replicate = ">=0.30"
-opentelemetry-instrumentation-bedrock = ">=0.0.1"
+typing_extensions = ">=4.0.0"
+parse = ">=1"
+opentelemetry-sdk = "<=1.27.0"
+opentelemetry-api = "<=1.27.0"
+opentelemetry-instrumentation-openai = ">=0.20"
+opentelemetry-instrumentation-cohere = ">=0.20"
+opentelemetry-instrumentation-anthropic = ">=0.20"
+opentelemetry-instrumentation-replicate = ">=0.20"
+opentelemetry-instrumentation-groq = ">=0.29"
+opentelemetry-instrumentation-bedrock = ">=0.15"
[tool.poetry.group.dev.dependencies]
parse-type = ">=0.6.4"
@@ -63,7 +63,6 @@ types-python-dateutil = "^2.9.0.20240316"
ruff = "^0.5.6"
python-dotenv = "^1.0.1"
openai = "^1.52.2"
-pandas = ">=1.3.2"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index caffb512..0459ad8a 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -76,7 +76,7 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
write_to_opentelemetry_span(
span=span,
key=HUMANLOOP_LOG_KEY,
- value=prompt_log,
+ value=prompt_log, # type: ignore
)
# Return the output of the decorated function
diff --git a/src/humanloop/eval_utils/run.py b/src/humanloop/eval_utils/run.py
index 6d2eda4e..fad5b280 100644
--- a/src/humanloop/eval_utils/run.py
+++ b/src/humanloop/eval_utils/run.py
@@ -57,7 +57,7 @@
from humanloop.types.datapoint_response_target_value import DatapointResponseTargetValue
from humanloop.types.evaluation_run_response import EvaluationRunResponse
from humanloop.types.run_stats_response import RunStatsResponse
-from humanloop.types.validation_error import ValidationError
+from pydantic import ValidationError
if typing.TYPE_CHECKING:
from humanloop.client import BaseHumanloop
@@ -616,13 +616,13 @@ def _get_log_func(
"run_id": run_id,
}
if file_type == "flow":
- return partial(client.flows.log, **log_request, trace_status="complete")
+ return partial(client.flows.log, **log_request, trace_status="complete") # type: ignore
elif file_type == "prompt":
- return partial(client.prompts.log, **log_request)
+ return partial(client.prompts.log, **log_request) # type: ignore
elif file_type == "evaluator":
- return partial(client.evaluators.log, **log_request)
+ return partial(client.evaluators.log, **log_request) # type: ignore
elif file_type == "tool":
- return partial(client.tools.log, **log_request)
+ return partial(client.tools.log, **log_request) # type: ignore
else:
raise NotImplementedError(f"Unsupported File version: {file_type}")
From 52c6c7b0c67140304f622167442b961de3f2b5f0 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 15:38:34 +0000
Subject: [PATCH 66/70] Parse '|' annotations in tool decorator
---
.github/workflows/ci.yml | 28 +++++++++-
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/decorators/prompt.py | 2 +-
src/humanloop/decorators/tool.py | 12 ++++-
tests/decorators/test_tool_decorator.py | 72 +++++++++++++++++++++++++
5 files changed, 111 insertions(+), 5 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index e043d505..3d587f63 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,7 @@ jobs:
- name: Set up python
uses: actions/setup-python@v4
with:
- python-version: 3.9
+ python-version: 3.12
- name: Bootstrap poetry
run: |
curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
@@ -41,6 +41,32 @@ jobs:
REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }}
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
+ test_3_12:
+ # Run the test suite with Python 3.12 too
+ # Some tool decorator tests assert the ability to parse the signature
+ # of functions that use typing features introduced in Python 3.10 e.g. '|'
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Checkout repo
+ uses: actions/checkout@v3
+ - name: Set up python
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.12
+ - name: Bootstrap poetry
+ run: |
+ curl -sSL https://install.python-poetry.org | python - -y --version 1.5.1
+ - name: Install dependencies
+ run: poetry install
+
+ - name: Test
+ run: poetry run pytest -rP .
+ env:
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
+ REPLICATE_API_KEY: ${{ secrets.REPLICATE_API_KEY }}
+ GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
+ COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }}
publish:
needs: [compile, test]
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index d5637fcf..aa78c82d 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -28,7 +28,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span: Span
- with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span:
+ with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span: # type: ignore
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index 0459ad8a..f1ab8fa2 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -26,7 +26,7 @@ def decorator(func: Callable):
@wraps(func)
def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
span: Span
- with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span:
+ with opentelemetry_tracer.start_as_current_span(generate_span_id()) as span: # type: ignore
span_id = span.get_span_context().span_id
if span.parent:
span_parent_id = span.parent.span_id
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index c7e38293..2d3ac763 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -1,6 +1,7 @@
import builtins
import inspect
import logging
+import sys
import textwrap
import typing
from dataclasses import dataclass
@@ -24,6 +25,9 @@
from humanloop.requests.tool_function import ToolFunctionParams
from humanloop.requests.tool_kernel_request import ToolKernelRequestParams
+if sys.version_info >= (3, 10):
+ import types
+
logger = logging.getLogger("humanloop.sdk")
@@ -335,7 +339,7 @@ def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
annotation=[_parse_annotation(arg) for arg in typing.get_args(annotation)],
)
- if origin is typing.Union:
+ if origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType):
sub_types = typing.get_args(annotation)
if sub_types[-1] is type(None):
# type(None) in sub_types indicates Optional type
@@ -495,4 +499,8 @@ def _parameter_is_optional(
origin = typing.get_origin(parameter.annotation)
# sub_types refers to T inside the annotation
sub_types = typing.get_args(parameter.annotation)
- return origin is typing.Union and len(sub_types) > 0 and sub_types[-1] is type(None)
+ return (
+ (origin is typing.Union or (sys.version_info >= (3, 10) and origin is types.UnionType))
+ and len(sub_types) > 0
+ and sub_types[-1] is type(None)
+ )
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index 340fbf75..f00ab68c 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -1,3 +1,4 @@
+import sys
from typing import Any, Optional, TypedDict, Union
import pytest
@@ -460,3 +461,74 @@ def calculator(operation: str, num1: float, num2: float) -> float:
key=HUMANLOOP_FILE_KEY,
)
assert hl_file_higher_order_fn["tool"]["source_code"] == hl_file_decorated_fn["tool"]["source_code"] # type: ignore
+
+
+def test_python310_syntax(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ if sys.version_info < (3, 10):
+ pytest.skip("Requires Python 3.10")
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
+ # GIVEN a function annotated with @tool where a parameter uses `|` for Optional
+ @tool(opentelemetry_tracer=tracer)
+ def calculator(a: float, b: float | None = None) -> float:
+ # NOTE: dummy function, only testing its signature not correctness
+ if a is None:
+ a = 0
+ return a + b # type: ignore
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correct
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": ["number", "null"]},
+ },
+ "required": ("a",),
+ "type": "object",
+ "additionalProperties": False,
+ },
+ "strict": True,
+ }
+
+ Validator.check_schema(calculator.json_schema)
+
+
+def test_python310_union_syntax(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ if sys.version_info < (3, 10):
+ pytest.skip("Requires Python 3.10")
+
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
+ # GIVEN a function annotated with @tool where a parameter uses `|` for Union
+ @tool(opentelemetry_tracer=tracer)
+ def calculator(a: float, b: float | int | str) -> float:
+ # NOTE: dummy function, only testing its signature not correctness
+ return a + b # type: ignore
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correct
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"anyOf": [{"type": "number"}, {"type": "integer"}, {"type": "string"}]},
+ },
+ "required": ("a", "b"),
+ "type": "object",
+ "additionalProperties": False,
+ },
+ "strict": True,
+ }
+
+ Validator.check_schema(calculator.json_schema)
From 7b2e250effff19e794ba3a32b9188bf6264fb634 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 15:52:25 +0000
Subject: [PATCH 67/70] Support for ellipsis parsing
---
src/humanloop/decorators/tool.py | 7 ++++--
tests/decorators/test_tool_decorator.py | 33 +++++++++++++++++++++++++
2 files changed, 38 insertions(+), 2 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index 2d3ac763..aad6f817 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -219,6 +219,7 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
float,
bool,
Parameter.empty, # type: ignore
+ Ellipsis,
]
@@ -240,7 +241,7 @@ class _ParsedPrimitiveAnnotation(_ParsedAnnotation):
annotation: _PRIMITIVE_TYPES
def no_type_hint(self) -> bool:
- return self.annotation is Parameter.empty
+ return self.annotation is Parameter.empty or self.annotation is Ellipsis
@dataclass
@@ -288,12 +289,14 @@ def _parse_annotation(annotation: typing.Type) -> _ParsedAnnotation:
if origin is None:
# Either not a nested type or no type hint
# Parameter.empty is used for parameters without type hints
+ # Ellipsis is interpreted as Any
if annotation not in (
str,
int,
float,
bool,
Parameter.empty,
+ Ellipsis,
dict,
list,
tuple,
@@ -459,7 +462,7 @@ def _annotation_parse_to_json_schema(
arg_type = {"type": "number"}
if arg.annotation is builtins.bool:
arg_type = {"type": "boolean"}
- if arg.annotation is Parameter.empty:
+ if arg.annotation is Parameter.empty or arg.annotation is Ellipsis:
# JSON Schema dropped support for 'any' type, we allow any type as a workaround
arg_type = {"type": _JSON_SCHEMA_ANY}
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index f00ab68c..a0556b77 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -532,3 +532,36 @@ def calculator(a: float, b: float | int | str) -> float:
}
Validator.check_schema(calculator.json_schema)
+
+
+def test_python_list_ellipsis(
+ opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
+):
+ # GIVEN an OTel configuration
+ tracer, _ = opentelemetry_test_configuration
+
+ # GIVEN a function annotated with @tool where a parameter uses `...`
+ @tool(opentelemetry_tracer=tracer)
+ def calculator(a: float, b: ...) -> float:
+ # NOTE: dummy function, only testing its signature not correctness
+ if isinstance(b, list):
+ return a + sum(b)
+ return a + b
+
+ # WHEN building the Tool kernel
+ # THEN the JSON schema is correct
+ assert calculator.json_schema == {
+ "description": "",
+ "name": "calculator",
+ "parameters": {
+ "properties": {
+ "a": {"type": "number"},
+ # THEN b is of any type
+ "b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
+ },
+ "required": ("a", "b"),
+ "type": "object",
+ "additionalProperties": False,
+ },
+ "strict": True,
+ }
From b9668d286d0569ac27c23ff7bc81bff12960045e Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 15:57:12 +0000
Subject: [PATCH 68/70] Added support for ellipsis in type parsing
---
src/humanloop/decorators/tool.py | 26 +++++++++++++++++--------
tests/decorators/test_tool_decorator.py | 11 ++++++-----
2 files changed, 24 insertions(+), 13 deletions(-)
diff --git a/src/humanloop/decorators/tool.py b/src/humanloop/decorators/tool.py
index aad6f817..0662041b 100644
--- a/src/humanloop/decorators/tool.py
+++ b/src/humanloop/decorators/tool.py
@@ -213,14 +213,24 @@ def _build_function_parameters_property(func) -> _JSONSchemaFunctionParameters:
)
-_PRIMITIVE_TYPES = Union[
- str,
- int,
- float,
- bool,
- Parameter.empty, # type: ignore
- Ellipsis,
-]
+if sys.version_info >= (3, 10):
+ _PRIMITIVE_TYPES = Union[
+ str,
+ int,
+ float,
+ bool,
+ Parameter.empty, # type: ignore
+ Ellipsis, # type: ignore
+ ]
+else:
+ # Ellipsis not supported in typing module before Python 3.10
+ _PRIMITIVE_TYPES = Union[
+ str,
+ int,
+ float,
+ bool,
+ Parameter.empty, # type: ignore
+ ]
@dataclass
diff --git a/tests/decorators/test_tool_decorator.py b/tests/decorators/test_tool_decorator.py
index a0556b77..34b330ef 100644
--- a/tests/decorators/test_tool_decorator.py
+++ b/tests/decorators/test_tool_decorator.py
@@ -537,16 +537,18 @@ def calculator(a: float, b: float | int | str) -> float:
def test_python_list_ellipsis(
opentelemetry_test_configuration: tuple[Tracer, InMemorySpanExporter],
):
+ if sys.version_info < (3, 10):
+ pytest.skip("Requires Python 3.10")
# GIVEN an OTel configuration
tracer, _ = opentelemetry_test_configuration
# GIVEN a function annotated with @tool where a parameter uses `...`
@tool(opentelemetry_tracer=tracer)
- def calculator(a: float, b: ...) -> float:
+ def calculator(b: ...) -> float | None: # type: ignore
# NOTE: dummy function, only testing its signature not correctness
if isinstance(b, list):
- return a + sum(b)
- return a + b
+ return sum(b)
+ return None
# WHEN building the Tool kernel
# THEN the JSON schema is correct
@@ -555,11 +557,10 @@ def calculator(a: float, b: ...) -> float:
"name": "calculator",
"parameters": {
"properties": {
- "a": {"type": "number"},
# THEN b is of any type
"b": {"type": ["string", "integer", "number", "boolean", "object", "array", "null"]},
},
- "required": ("a", "b"),
+ "required": ("b",),
"type": "object",
"additionalProperties": False,
},
From 86c20ba496eb34203f9f44d0826a76642bce2a79 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 16:51:15 +0000
Subject: [PATCH 69/70] switch to copycontext
---
src/humanloop/otel/exporter.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/otel/exporter.py b/src/humanloop/otel/exporter.py
index 9cc4e680..8208d06c 100644
--- a/src/humanloop/otel/exporter.py
+++ b/src/humanloop/otel/exporter.py
@@ -14,7 +14,7 @@
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
from humanloop.core import ApiError as HumanloopApiError
-from humanloop.eval_utils.context import EvaluationContext
+from humanloop.eval_utils.context import EVALUATION_CONTEXT_VARIABLE_NAME, EvaluationContext
from humanloop.otel import TRACE_FLOW_CONTEXT, FlowContext
from humanloop.otel.constants import (
HUMANLOOP_FILE_KEY,
@@ -94,16 +94,20 @@ def is_evaluated_file(
if is_humanloop_span(span):
# We pass the EvaluationContext from the eval_run utility thread to
# the export thread so the .log action works as expected
+ evaluation_context_copy = None
+ for context_var, context_var_value in contextvars.copy_context().items():
+ if context_var.name == EVALUATION_CONTEXT_VARIABLE_NAME:
+ evaluation_context_copy = context_var_value
self._upload_queue.put(
(
span,
- copy.deepcopy(evaluation_context),
+ evaluation_context_copy,
),
)
logger.debug(
"Span %s with EvaluationContext %s added to upload queue",
span.attributes,
- copy.deepcopy(evaluation_context),
+ evaluation_context_copy,
)
# Reset the EvaluationContext so run eval does not
# create a duplicate Log
From 9274910ee1d220f7fb73eff394436c8d1bf8fb56 Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Tue, 12 Nov 2024 17:12:42 +0000
Subject: [PATCH 70/70] removed redundant code
---
src/humanloop/decorators/prompt.py | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/src/humanloop/decorators/prompt.py b/src/humanloop/decorators/prompt.py
index f1ab8fa2..c1f68a77 100644
--- a/src/humanloop/decorators/prompt.py
+++ b/src/humanloop/decorators/prompt.py
@@ -82,16 +82,10 @@ def wrapper(*args: Sequence[Any], **kwargs: Mapping[str, Any]) -> Any:
# Return the output of the decorated function
return output
- prompt_kernel_file = {**prompt_kernel}
- if prompt_kernel_file.get("provider") is None:
- prompt_kernel_file["provider"] = "openai" # type: ignore
- if prompt_kernel_file.get("endpoint") is None:
- prompt_kernel_file["endpoint"] = "chat" # type: ignore
-
wrapper.file = File( # type: ignore
path=path if path else func.__name__,
type="prompt",
- version={**prompt_kernel_file}, # type: ignore
+ version={**prompt_kernel}, # type: ignore
callable=wrapper,
)