From ba9e223388b4b28a3c930605e1a90f67418c3633 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Thu, 24 Apr 2025 09:48:51 +0000 Subject: [PATCH 01/10] Release 0.8.36 --- poetry.lock | 6 +- pyproject.toml | 2 +- reference.md | 4061 +++++++++++++++-- src/humanloop/__init__.py | 213 +- src/humanloop/agents/__init__.py | 37 + src/humanloop/agents/client.py | 3188 +++++++++++++ src/humanloop/agents/raw_client.py | 3869 ++++++++++++++++ src/humanloop/agents/requests/__init__.py | 19 + .../requests/agent_log_request_tool_choice.py | 8 + .../agent_request_reasoning_effort.py | 6 + .../agents/requests/agent_request_stop.py | 5 + .../agents/requests/agent_request_template.py | 6 + .../requests/agent_request_tools_item.py | 7 + .../agents_call_request_tool_choice.py | 8 + .../agents_call_stream_request_tool_choice.py | 8 + src/humanloop/agents/types/__init__.py | 19 + .../types/agent_log_request_tool_choice.py | 8 + .../types/agent_request_reasoning_effort.py | 6 + .../agents/types/agent_request_stop.py | 5 + .../agents/types/agent_request_template.py | 6 + .../agents/types/agent_request_tools_item.py | 7 + .../types/agents_call_request_tool_choice.py | 8 + .../agents_call_stream_request_tool_choice.py | 8 + src/humanloop/base_client.py | 4 + src/humanloop/core/client_wrapper.py | 4 +- src/humanloop/files/client.py | 12 +- src/humanloop/files/raw_client.py | 24 +- ...th_files_retrieve_by_path_post_response.py | 8 +- ...th_files_retrieve_by_path_post_response.py | 3 +- src/humanloop/flows/client.py | 8 +- src/humanloop/logs/client.py | 4 +- src/humanloop/prompts/__init__.py | 4 + src/humanloop/prompts/client.py | 207 +- src/humanloop/prompts/raw_client.py | 263 +- src/humanloop/prompts/requests/__init__.py | 2 + .../prompt_request_reasoning_effort.py | 6 + src/humanloop/prompts/types/__init__.py | 2 + .../types/prompt_request_reasoning_effort.py | 6 + src/humanloop/requests/__init__.py | 88 +- src/humanloop/requests/agent_call_response.py | 202 + .../agent_call_response_tool_choice.py | 8 + .../requests/agent_call_stream_response.py | 17 + .../agent_call_stream_response_payload.py | 8 + .../requests/agent_continue_response.py | 202 + .../agent_continue_response_tool_choice.py | 8 + .../agent_continue_stream_response.py | 17 + .../agent_continue_stream_response_payload.py | 8 + src/humanloop/requests/agent_inline_tool.py | 13 + .../requests/agent_kernel_request.py | 112 + .../agent_kernel_request_reasoning_effort.py | 6 + .../requests/agent_kernel_request_stop.py | 5 + .../requests/agent_kernel_request_template.py | 6 + .../agent_kernel_request_tools_item.py | 7 + .../requests/agent_linked_file_request.py | 13 + .../requests/agent_linked_file_response.py | 19 + .../agent_linked_file_response_file.py | 21 + src/humanloop/requests/agent_log_response.py | 201 + .../agent_log_response_tool_choice.py | 8 + .../requests/agent_log_stream_response.py | 87 + src/humanloop/requests/agent_response.py | 237 + .../agent_response_reasoning_effort.py | 6 + src/humanloop/requests/agent_response_stop.py | 5 + .../requests/agent_response_template.py | 6 + .../requests/agent_response_tools_item.py | 10 + .../anthropic_redacted_thinking_content.py | 12 + .../requests/anthropic_thinking_content.py | 17 + src/humanloop/requests/chat_message.py | 6 + .../requests/chat_message_thinking_item.py | 7 + .../requests/create_agent_log_response.py | 31 + src/humanloop/requests/dataset_response.py | 5 + ...arents_and_children_response_files_item.py | 8 +- src/humanloop/requests/evaluator_response.py | 5 + .../file_environment_response_file.py | 8 +- .../file_environment_variable_request.py | 15 + src/humanloop/requests/flow_response.py | 5 + src/humanloop/requests/linked_file_request.py | 10 + src/humanloop/requests/list_agents.py | 12 + src/humanloop/requests/log_response.py | 7 +- src/humanloop/requests/log_stream_response.py | 7 + .../requests/paginated_data_agent_response.py | 12 + ..._response_flow_response_agent_response.py} | 8 +- ...w_response_agent_response_records_item.py} | 14 +- .../requests/populate_template_response.py | 11 +- ...late_template_response_reasoning_effort.py | 6 + .../requests/prompt_kernel_request.py | 12 +- .../prompt_kernel_request_reasoning_effort.py | 6 + src/humanloop/requests/prompt_response.py | 11 +- .../prompt_response_reasoning_effort.py | 6 + .../requests/run_version_response.py | 3 +- src/humanloop/requests/tool_call_response.py | 146 + src/humanloop/requests/tool_log_response.py | 6 + .../version_deployment_response_file.py | 8 +- .../requests/version_id_response_version.py | 8 +- src/humanloop/tools/client.py | 523 ++- src/humanloop/tools/raw_client.py | 765 +++- src/humanloop/types/__init__.py | 96 +- src/humanloop/types/agent_call_response.py | 224 + .../types/agent_call_response_tool_choice.py | 8 + .../types/agent_call_stream_response.py | 42 + .../agent_call_stream_response_payload.py | 8 + .../types/agent_continue_response.py | 224 + .../agent_continue_response_tool_choice.py | 8 + .../types/agent_continue_stream_response.py | 42 + .../agent_continue_stream_response_payload.py | 8 + src/humanloop/types/agent_inline_tool.py | 23 + src/humanloop/types/agent_kernel_request.py | 122 + .../agent_kernel_request_reasoning_effort.py | 6 + .../types/agent_kernel_request_stop.py | 5 + .../types/agent_kernel_request_template.py | 6 + .../types/agent_kernel_request_tools_item.py | 7 + .../types/agent_linked_file_request.py | 23 + .../types/agent_linked_file_response.py | 39 + .../types/agent_linked_file_response_file.py | 16 + src/humanloop/types/agent_log_response.py | 224 + .../types/agent_log_response_tool_choice.py | 8 + .../types/agent_log_stream_response.py | 98 + src/humanloop/types/agent_response.py | 260 ++ .../types/agent_response_reasoning_effort.py | 6 + src/humanloop/types/agent_response_stop.py | 5 + .../types/agent_response_template.py | 6 + .../types/agent_response_tools_item.py | 10 + .../anthropic_redacted_thinking_content.py | 23 + .../types/anthropic_thinking_content.py | 28 + src/humanloop/types/chat_message.py | 6 + .../types/chat_message_thinking_item.py | 7 + .../types/create_agent_log_response.py | 42 + src/humanloop/types/dataset_response.py | 9 + ...tory_with_parents_and_children_response.py | 2 + ...arents_and_children_response_files_item.py | 3 +- src/humanloop/types/evaluatee_response.py | 2 + .../types/evaluation_evaluator_response.py | 2 + .../types/evaluation_log_response.py | 3 + src/humanloop/types/evaluation_response.py | 2 + .../types/evaluation_run_response.py | 2 + .../types/evaluation_runs_response.py | 2 + src/humanloop/types/evaluator_log_response.py | 3 + src/humanloop/types/evaluator_response.py | 11 + src/humanloop/types/event_type.py | 21 + .../types/file_environment_response.py | 2 + .../types/file_environment_response_file.py | 3 +- .../file_environment_variable_request.py | 27 + src/humanloop/types/file_type.py | 2 +- src/humanloop/types/files_tool_type.py | 2 +- src/humanloop/types/flow_log_response.py | 3 + src/humanloop/types/flow_response.py | 11 + src/humanloop/types/linked_file_request.py | 21 + src/humanloop/types/list_agents.py | 31 + src/humanloop/types/list_evaluators.py | 2 + src/humanloop/types/list_flows.py | 2 + src/humanloop/types/list_prompts.py | 2 + src/humanloop/types/list_tools.py | 2 + src/humanloop/types/log_response.py | 5 +- src/humanloop/types/log_stream_response.py | 7 + src/humanloop/types/model_providers.py | 2 +- .../types/monitoring_evaluator_response.py | 2 + src/humanloop/types/on_agent_call_enum.py | 5 + .../types/open_ai_reasoning_effort.py | 5 + .../types/paginated_data_agent_response.py | 31 + .../paginated_data_evaluation_log_response.py | 3 + .../paginated_data_evaluator_response.py | 2 + .../types/paginated_data_flow_response.py | 2 + .../types/paginated_data_log_response.py | 3 + .../types/paginated_data_prompt_response.py | 2 + .../types/paginated_data_tool_response.py | 2 + ..._response_flow_response_agent_response.py} | 12 +- ...w_response_agent_response_records_item.py} | 7 +- .../types/paginated_evaluation_response.py | 2 + .../types/populate_template_response.py | 17 +- ...late_template_response_reasoning_effort.py | 6 + src/humanloop/types/prompt_call_response.py | 2 + src/humanloop/types/prompt_kernel_request.py | 12 +- .../prompt_kernel_request_reasoning_effort.py | 6 + src/humanloop/types/prompt_log_response.py | 3 + src/humanloop/types/prompt_response.py | 17 +- .../types/prompt_response_reasoning_effort.py | 6 + src/humanloop/types/reasoning_effort.py | 5 - src/humanloop/types/run_version_response.py | 3 +- src/humanloop/types/tool_call_response.py | 168 + src/humanloop/types/tool_log_response.py | 9 + src/humanloop/types/tool_response.py | 2 + .../types/version_deployment_response.py | 2 + .../types/version_deployment_response_file.py | 3 +- src/humanloop/types/version_id_response.py | 2 + .../types/version_id_response_version.py | 3 +- 184 files changed, 16582 insertions(+), 665 deletions(-) create mode 100644 src/humanloop/agents/__init__.py create mode 100644 src/humanloop/agents/client.py create mode 100644 src/humanloop/agents/raw_client.py create mode 100644 src/humanloop/agents/requests/__init__.py create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py create mode 100644 src/humanloop/agents/requests/agent_request_stop.py create mode 100644 src/humanloop/agents/requests/agent_request_template.py create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/agents/types/__init__.py create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py create mode 100644 src/humanloop/agents/types/agent_request_stop.py create mode 100644 src/humanloop/agents/types/agent_request_template.py create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py create mode 100644 src/humanloop/requests/agent_call_response.py create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_call_stream_response.py create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py create mode 100644 src/humanloop/requests/agent_continue_response.py create mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_continue_stream_response.py create mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py create mode 100644 src/humanloop/requests/agent_inline_tool.py create mode 100644 src/humanloop/requests/agent_kernel_request.py create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py create mode 100644 src/humanloop/requests/agent_kernel_request_template.py create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/requests/agent_linked_file_request.py create mode 100644 src/humanloop/requests/agent_linked_file_response.py create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py create mode 100644 src/humanloop/requests/agent_log_response.py create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_log_stream_response.py create mode 100644 src/humanloop/requests/agent_response.py create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py create mode 100644 src/humanloop/requests/agent_response_stop.py create mode 100644 src/humanloop/requests/agent_response_template.py create mode 100644 src/humanloop/requests/agent_response_tools_item.py create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py create mode 100644 src/humanloop/requests/anthropic_thinking_content.py create mode 100644 src/humanloop/requests/chat_message_thinking_item.py create mode 100644 src/humanloop/requests/create_agent_log_response.py create mode 100644 src/humanloop/requests/file_environment_variable_request.py create mode 100644 src/humanloop/requests/linked_file_request.py create mode 100644 src/humanloop/requests/list_agents.py create mode 100644 src/humanloop/requests/log_stream_response.py create mode 100644 src/humanloop/requests/paginated_data_agent_response.py rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%) rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%) create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py create mode 100644 src/humanloop/requests/tool_call_response.py create mode 100644 src/humanloop/types/agent_call_response.py create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py create mode 100644 src/humanloop/types/agent_call_stream_response.py create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py create mode 100644 src/humanloop/types/agent_continue_response.py create mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py create mode 100644 src/humanloop/types/agent_continue_stream_response.py create mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py create mode 100644 src/humanloop/types/agent_inline_tool.py create mode 100644 src/humanloop/types/agent_kernel_request.py create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/types/agent_kernel_request_stop.py create mode 100644 src/humanloop/types/agent_kernel_request_template.py create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/types/agent_linked_file_request.py create mode 100644 src/humanloop/types/agent_linked_file_response.py create mode 100644 src/humanloop/types/agent_linked_file_response_file.py create mode 100644 src/humanloop/types/agent_log_response.py create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py create mode 100644 src/humanloop/types/agent_log_stream_response.py create mode 100644 src/humanloop/types/agent_response.py create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py create mode 100644 src/humanloop/types/agent_response_stop.py create mode 100644 src/humanloop/types/agent_response_template.py create mode 100644 src/humanloop/types/agent_response_tools_item.py create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py create mode 100644 src/humanloop/types/anthropic_thinking_content.py create mode 100644 src/humanloop/types/chat_message_thinking_item.py create mode 100644 src/humanloop/types/create_agent_log_response.py create mode 100644 src/humanloop/types/event_type.py create mode 100644 src/humanloop/types/file_environment_variable_request.py create mode 100644 src/humanloop/types/linked_file_request.py create mode 100644 src/humanloop/types/list_agents.py create mode 100644 src/humanloop/types/log_stream_response.py create mode 100644 src/humanloop/types/on_agent_call_enum.py create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py create mode 100644 src/humanloop/types/paginated_data_agent_response.py rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (76%) rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%) create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py delete mode 100644 src/humanloop/types/reasoning_effort.py create mode 100644 src/humanloop/types/tool_call_response.py diff --git a/poetry.lock b/poetry.lock index 4ce5d536..056dd550 100644 --- a/poetry.lock +++ b/poetry.lock @@ -873,13 +873,13 @@ files = [ [[package]] name = "openai" -version = "1.75.0" +version = "1.76.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"}, - {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"}, + {file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"}, + {file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index ad96beec..73f2c3d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "humanloop" [tool.poetry] name = "humanloop" -version = "0.8.35" +version = "0.8.36" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 27a32c92..0cc41043 100644 --- a/reference.md +++ b/reference.md @@ -56,7 +56,7 @@ client.prompts.log( messages=[{"role": "user", "content": "What really happened at Roswell?"}], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-18 21:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -1501,7 +1501,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. +**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2518,7 @@ client.prompts.update_monitoring( -## Tools -
client.tools.log(...) +
client.prompts.serialize(...)
@@ -2531,15 +2530,13 @@ client.prompts.update_monitoring(
-Log to a Tool. +Serialize a Prompt to the .prompt file format. -You can use query parameters `version_id`, or `environment`, to target -an existing version of the Tool. Otherwise the default deployed version will be chosen. +Useful for storing the Prompt with your code in a version control system, +or for editing with an AI tool. -Instead of targeting an existing version explicitly, you can instead pass in -Tool details in the request body. In this case, we will check if the details correspond -to an existing version of the Tool, if not we will create a new version. This is helpful -in the case where you are storing or deriving your Tool details in code. +By default, the deployed version of the Prompt is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2556,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.tools.log( - path="math-tool", - tool={ - "function": { - "name": "multiply", - "description": "Multiply two numbers", - "parameters": { - "type": "object", - "properties": { - "a": {"type": "number"}, - "b": {"type": "number"}, - }, - "required": ["a", "b"], - }, - } - }, - inputs={"a": 5, "b": 7}, - output="35", +client.prompts.serialize( + id="id", ) ``` @@ -2593,7 +2574,7 @@ client.tools.log(
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to. +**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2582,7 @@ client.tools.log(
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. +**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2590,7 @@ client.tools.log(
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. +**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2598,72 @@ client.tools.log(
-**id:** `typing.Optional[str]` — ID for an existing Tool. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
+ + + +
+ +
client.prompts.deserialize(...)
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. - +#### 📝 Description + +
+
+ +
+
+ +Deserialize a Prompt from the .prompt file format. + +This returns a subset of the attributes required by a Prompt. +This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+ +#### 🔌 Usage
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. - +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.prompts.deserialize( + prompt="prompt", +) + +``` +
+
+#### ⚙️ Parameters +
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. +
+
+ +**prompt:** `str`
@@ -2649,15 +2671,78 @@ client.tools.log(
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + +
+
+
+## Tools +
client.tools.call(...)
-**error:** `typing.Optional[str]` — Error message if the log is an error. +#### 📝 Description + +
+
+ +
+
+ +Call a Tool. + +Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Tool. Otherwise, the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Tool details in the request body. In this case, we will check if the details correspond +to an existing version of the Tool. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Tool details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.call() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2750,7 @@ client.tools.log(
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2758,7 @@ client.tools.log(
-**stdout:** `typing.Optional[str]` — Captured log and debug statements. +**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2766,7 @@ client.tools.log(
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. +**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2774,7 @@ client.tools.log(
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. +**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2806,7 @@ client.tools.log(
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2814,7 @@ client.tools.log(
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2822,7 @@ client.tools.log(
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2830,7 @@ client.tools.log(
-**user:** `typing.Optional[str]` — End-user ID related to the Log. +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2838,7 @@ client.tools.log(
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2846,7 @@ client.tools.log(
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. +**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2854,7 @@ client.tools.log(
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. +**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2862,15 @@ client.tools.log(
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new. +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2890,7 @@ client.tools.log(
-
client.tools.update(...) +
client.tools.log(...)
@@ -2809,9 +2902,15 @@ client.tools.log(
-Update a Log. +Log to a Tool. -Update the details of a Log with the given ID. +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Tool. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Tool details in the request body. In this case, we will check if the details correspond +to an existing version of the Tool, if not we will create a new version. This is helpful +in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2930,24 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.tools.update( - id="id", - log_id="log_id", +client.tools.log( + path="math-tool", + tool={ + "function": { + "name": "multiply", + "description": "Multiply two numbers", + "parameters": { + "type": "object", + "properties": { + "a": {"type": "number"}, + "b": {"type": "number"}, + }, + "required": ["a", "b"], + }, + } + }, + inputs={"a": 5, "b": 7}, + output="35", ) ``` @@ -2850,7 +2964,7 @@ client.tools.update(
-**id:** `str` — Unique identifier for Prompt. +**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2972,7 @@ client.tools.update(
-**log_id:** `str` — Unique identifier for the Log. +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2980,7 @@ client.tools.update(
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. +**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +2988,7 @@ client.tools.update(
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. +**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +2996,7 @@ client.tools.update(
-**error:** `typing.Optional[str]` — Error message if the log is an error. +**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3004,7 @@ client.tools.update(
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3012,7 @@ client.tools.update(
-**stdout:** `typing.Optional[str]` — Captured log and debug statements. +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3020,7 @@ client.tools.update(
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. +**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3028,7 @@ client.tools.update(
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3036,7 @@ client.tools.update(
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. +**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3044,7 @@ client.tools.update(
-**source:** `typing.Optional[str]` — Identifies where the model was called from. +**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3052,7 @@ client.tools.update(
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. +**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3060,7 @@ client.tools.update(
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. +**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3068,7 @@ client.tools.update(
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. +**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3076,7 @@ client.tools.update(
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,31 +3084,288 @@ client.tools.update(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**source:** `typing.Optional[str]` — Identifies where the model was called from.
- -
+
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. +
-
-
client.tools.list(...)
-#### 📝 Description +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+
-Get a list of all Tools. +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + + + + + +
+ +
client.tools.update(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a Log. + +Update the details of a Log with the given ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.update( + id="id", + log_id="log_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Prompt. + +
+
+ +
+
+ +**log_id:** `str` — Unique identifier for the Log. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + +
+
+ +
+
+ +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — Error message if the log is an error. + +
+
+ +
+
+ +**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. + +
+
+ +
+
+ +**stdout:** `typing.Optional[str]` — Captured log and debug statements. + +
+
+ +
+
+ +**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. + +
+
+ +
+
+ +**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.tools.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all Tools.
@@ -4083,12 +4454,11 @@ client.tools.update_monitoring(
-## Datasets -
client.datasets.list(...) +
client.tools.get_environment_variables(...)
-#### 📝 Description +#### 🔌 Usage
@@ -4096,8 +4466,209 @@ client.tools.update_monitoring(
-List all Datasets. -
+```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.get_environment_variables( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for File. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + + + +
+ +
client.tools.add_environment_variable(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Add an environment variable to a Tool. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Tool. + +
+
+ +
+
+ +**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.tools.delete_environment_variable(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.delete_environment_variable( + id="id", + name="name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for File. + +
+
+ +
+
+ +**name:** `str` — Name of the Environment Variable to delete. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Datasets +
client.datasets.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Datasets. +
@@ -6688,7 +7259,2721 @@ client.evaluators.update_monitoring(
-**id:** `str` +**id:** `str` + +
+
+ +
+
+ +**activate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] +]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. + +
+
+ +
+
+ +**deactivate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] +]` — Evaluators to deactivate. These will not be run on new Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + + + +
+ +## Flows +
client.flows.log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Log to a Flow. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Flow. Otherwise, the default deployed version will be chosen. + +If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` +in order to trigger Evaluators. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +import datetime + +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.log( + id="fl_6o701g4jmcanPVHxdqD0O", + flow={ + "attributes": { + "prompt": { + "template": "You are a helpful assistant helping with medical anamnesis", + "model": "gpt-4o", + "temperature": 0.8, + }, + "tool": { + "name": "retrieval_tool_v3", + "description": "Retrieval tool for MedQA.", + "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", + }, + } + }, + inputs={ + "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath." + }, + output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", + log_status="incomplete", + start_time=datetime.datetime.fromisoformat( + "2024-07-08 21:40:35+00:00", + ), + end_time=datetime.datetime.fromisoformat( + "2024-07-08 21:40:39+00:00", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. + +
+
+ +
+
+ +**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Flow. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + +
+
+ +
+
+ +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — Error message if the log is an error. + +
+
+ +
+
+ +**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. + +
+
+ +
+
+ +**stdout:** `typing.Optional[str]` — Captured log and debug statements. + +
+
+ +
+
+ +**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. + +
+
+ +
+
+ +**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.update_log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update the status, inputs, output of a Flow Log. + +Marking a Flow Log as complete will trigger any monitoring Evaluators to run. +Inputs and output (or error) must be provided in order to mark it as complete. + +The end_time log attribute will be set to match the time the log is marked as complete. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.update_log( + log_id="medqa_experiment_0001", + inputs={ + "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath." + }, + output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", + log_status="complete", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**log_id:** `str` — Unique identifier of the Flow Log. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve the Flow with the given ID. + +By default, the deployed version of the Flow is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Flow. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.get( + id="fl_6o701g4jmcanPVHxdqD0O", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete the Flow with the given ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.delete( + id="fl_6o701g4jmcanPVHxdqD0O", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.move(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Move the Flow to a different path or change the name. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.move( + id="fl_6o701g4jmcanPVHxdqD0O", + path="new directory/new name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Name of the Flow. + +
+
+ +
+
+ +**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of Flows. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +response = client.flows.list( + size=1, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number for pagination. + +
+
+ +
+
+ +**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name. + +
+
+ +
+
+ +**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. + +
+
+ +
+
+ +**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by + +
+
+ +
+
+ +**order:** `typing.Optional[SortOrder]` — Direction to sort by. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.upsert(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create or update a Flow. + +Flows can also be identified by the `ID` or their `path`. + +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Flow - attempting to create a version with a name +that already exists will result in a 409 Conflict error. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.upsert( + path="Personal Projects/MedQA Flow", + attributes={ + "prompt": { + "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}", + "model": "gpt-4o", + "temperature": 0.8, + }, + "tool": { + "name": "retrieval_tool_v3", + "description": "Retrieval tool for MedQA.", + "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", + }, + "version_name": "medqa-flow-v1", + "version_description": "Initial version", + }, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Flow. + +
+
+ +
+
+ +**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.list_versions(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all the versions of a Flow. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.list_versions( + id="fl_6o701g4jmcanPVHxdqD0O", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.delete_flow_version(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a version of the Flow. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.delete_flow_version( + id="id", + version_id="version_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Flow. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.update_flow_version(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update the name or description of the Flow version. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.update_flow_version( + id="id", + version_id="version_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Flow. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.set_deployment(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deploy Flow to an Environment. + +Set the deployed version for the specified Environment. This Flow +will be used for calls made to the Flow in this Environment. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.set_deployment( + id="fl_6o701g4jmcanPVHxdqD0O", + environment_id="staging", + version_id="flv_6o701g4jmcanPVHxdqD0O", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Flow. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.remove_deployment(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Remove deployed Flow from the Environment. + +Remove the deployed version for the specified Environment. This Flow +will no longer be used for calls made to the Flow in this Environment. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.remove_deployment( + id="fl_6o701g4jmcanPVHxdqD0O", + environment_id="staging", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.list_environments(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Environments and their deployed versions for the Flow. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.list_environments( + id="fl_6o701g4jmcanPVHxdqD0O", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Flow. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.flows.update_monitoring(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Activate and deactivate Evaluators for monitoring the Flow. + +An activated Evaluator will automatically be run on all new "completed" Logs +within the Flow for monitoring purposes. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.flows.update_monitoring( + id="fl_6o701g4jmcanPVHxdqD0O", + activate=[{"evaluator_version_id": "evv_1abc4308abd"}], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**activate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] +]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. + +
+
+ +
+
+ +**deactivate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] +]` — Evaluators to deactivate. These will not be run on new Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Agents +
client.agents.log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an Agent Log. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise, the default deployed version will be chosen. + +If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` +in order to trigger Evaluators. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.log() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider. + +
+
+ +
+
+ +**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output. + +
+
+ +
+
+ +**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output. + +
+
+ +
+
+ +**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model. + +
+
+ +
+
+ +**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt. + +
+
+ +
+
+ +**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output. + +
+
+ +
+
+ +**finish_reason:** `typing.Optional[str]` — Reason the generation finished. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model must call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + +
+
+ +
+
+ +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — Error message if the log is an error. + +
+
+ +
+
+ +**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. + +
+
+ +
+
+ +**stdout:** `typing.Optional[str]` — Captured log and debug statements. + +
+
+ +
+
+ +**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. + +
+
+ +
+
+ +**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.update_log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a Log. + +Update the details of a Log with the given ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.update_log( + id="id", + log_id="log_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**log_id:** `str` — Unique identifier for the Log. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.call_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. + +Calling an Agent calls the model provider before logging +the request, responses and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. In this case, we will check if the details correspond +to an existing version of the Agent. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Agent details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +response = client.agents.call_stream() +for chunk in response.data: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model must call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + +
+
+ +
+
+ +**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + +
+
+ +
+
+ +**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.call(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. + +Calling an Agent calls the model provider before logging +the request, responses and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. In this case, we will check if the details correspond +to an existing version of the Agent. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Agent details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.call() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model must call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + +
+
+ +
+
+ +**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + +
+
+ +
+
+ +**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.continue_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Continue an incomplete Agent call. + +This endpoint allows continuing an existing incomplete Agent call, using the context +from the previous interaction. The Agent will resume processing from where it left off. + +The original log must be in an incomplete state to be continued. + +The messages in the request will be appended +to the original messages in the log. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +response = client.agents.continue_stream( + log_id="log_id", + messages=[{"role": "user"}], +) +for chunk in response.data: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**log_id:** `str` — This identifies the Agent Log to continue. + +
+
+ +
+
+ +**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + +
+
+ +
+
+ +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + +
+
+ +
+
+ +**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.continue_(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Continue an incomplete Agent call. + +This endpoint allows continuing an existing incomplete Agent call, using the context +from the previous interaction. The Agent will resume processing from where it left off. + +The original log must be in an incomplete state to be continued. + +The messages in the request will be appended +to the original messages in the log. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.continue_( + log_id="log_id", + messages=[{"role": "user"}], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**log_id:** `str` — This identifies the Agent Log to continue. + +
+
+ +
+
+ +**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6696,9 +9981,7 @@ client.evaluators.update_monitoring(
-**activate:** `typing.Optional[ - typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] -]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
@@ -6706,9 +9989,7 @@ client.evaluators.update_monitoring(
-**deactivate:** `typing.Optional[ - typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] -]` — Evaluators to deactivate. These will not be run on new Logs. +**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6728,8 +10009,7 @@ client.evaluators.update_monitoring(
-## Flows -
client.flows.log(...) +
client.agents.list(...)
@@ -6741,13 +10021,7 @@ client.evaluators.update_monitoring(
-Log to a Flow. - -You can use query parameters `version_id`, or `environment`, to target -an existing version of the Flow. Otherwise, the default deployed version will be chosen. - -If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` -in order to trigger Evaluators. +Get a list of all Agents.
@@ -6762,41 +10036,12 @@ in order to trigger Evaluators.
```python -import datetime - from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.log( - id="fl_6o701g4jmcanPVHxdqD0O", - flow={ - "attributes": { - "prompt": { - "template": "You are a helpful assistant helping with medical anamnesis", - "model": "gpt-4o", - "temperature": 0.8, - }, - "tool": { - "name": "retrieval_tool_v3", - "description": "Retrieval tool for MedQA.", - "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", - }, - } - }, - inputs={ - "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath." - }, - output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", - log_status="incomplete", - start_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:35+00:00", - ), - end_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:39+00:00", - ), -) +client.agents.list() ```
@@ -6812,7 +10057,7 @@ client.flows.log(
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to. +**page:** `typing.Optional[int]` — Page number for pagination.
@@ -6820,7 +10065,7 @@ client.flows.log(
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. +**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
@@ -6828,7 +10073,7 @@ client.flows.log(
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. +**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
@@ -6836,7 +10081,7 @@ client.flows.log(
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. +**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -6844,7 +10089,7 @@ client.flows.log(
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. +**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -6852,7 +10097,7 @@ client.flows.log(
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. +**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6860,71 +10105,76 @@ client.flows.log(
-**id:** `typing.Optional[str]` — ID for an existing Flow. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added. -
+
+
client.agents.upsert(...)
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added. - -
-
+#### 📝 Description
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. - -
-
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. - +Create an Agent or update it with a new version if it already exists. + +Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and +tools determine the versions of the Agent. + +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within an Agent - attempting to create a version with a name +that already exists will result in a 409 Conflict error. +
+
+#### 🔌 Usage +
-**error:** `typing.Optional[str]` — Error message if the log is an error. - -
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. - +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.upsert( + model="model", +) + +``` +
+
+#### ⚙️ Parameters +
-**stdout:** `typing.Optional[str]` — Captured log and debug statements. - -
-
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. +**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -6932,7 +10182,7 @@ client.flows.log(
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6940,7 +10190,7 @@ client.flows.log(
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. +**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6948,7 +10198,7 @@ client.flows.log(
-**source:** `typing.Optional[str]` — Identifies where the model was called from. +**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -6956,7 +10206,14 @@ client.flows.log(
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. +**template:** `typing.Optional[AgentRequestTemplateParams]` + +The template contains the main structure and instructions for the model, including input variables for dynamic values. + +For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. +For completion models, provide a prompt template as a string. + +Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -6964,7 +10221,7 @@ client.flows.log(
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. +**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -6972,7 +10229,7 @@ client.flows.log(
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. +**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -6980,7 +10237,7 @@ client.flows.log(
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. +**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
@@ -6988,7 +10245,7 @@ client.flows.log(
-**user:** `typing.Optional[str]` — End-user ID related to the Log. +**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
@@ -6996,7 +10253,7 @@ client.flows.log(
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. +**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
@@ -7004,7 +10261,7 @@ client.flows.log(
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. +**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
@@ -7012,7 +10269,7 @@ client.flows.log(
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. +**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
@@ -7020,7 +10277,7 @@ client.flows.log(
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace. +**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
@@ -7028,79 +10285,47 @@ client.flows.log(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
- - +
+
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed. +
-
-
client.flows.update_log(...)
-#### 📝 Description - -
-
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + +
+
-Update the status, inputs, output of a Flow Log. - -Marking a Flow Log as complete will trigger any monitoring Evaluators to run. -Inputs and output (or error) must be provided in order to mark it as complete. - -The end_time log attribute will be set to match the time the log is marked as complete. -
-
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. +
-#### 🔌 Usage - -
-
-
-```python -from humanloop import Humanloop - -client = Humanloop( - api_key="YOUR_API_KEY", -) -client.flows.update_log( - log_id="medqa_experiment_0001", - inputs={ - "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath." - }, - output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", - log_status="complete", -) - -``` -
-
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]` +
-#### ⚙️ Parameters - -
-
-
-**log_id:** `str` — Unique identifier of the Flow Log. +**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
@@ -7108,7 +10333,7 @@ client.flows.update_log(
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. +**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
@@ -7116,7 +10341,7 @@ client.flows.update_log(
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. +**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
@@ -7124,7 +10349,7 @@ client.flows.update_log(
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log. +**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -7132,7 +10357,7 @@ client.flows.update_log(
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. +**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7140,7 +10365,7 @@ client.flows.update_log(
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. +**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7148,7 +10373,7 @@ client.flows.update_log(
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. +**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7168,7 +10393,7 @@ client.flows.update_log(
-
client.flows.get(...) +
client.agents.delete_agent_version(...)
@@ -7180,10 +10405,7 @@ client.flows.update_log(
-Retrieve the Flow with the given ID. - -By default, the deployed version of the Flow is returned. Use the query parameters -`version_id` or `environment` to target a specific version of the Flow. +Delete a version of the Agent.
@@ -7203,8 +10425,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.get( - id="fl_6o701g4jmcanPVHxdqD0O", +client.agents.delete_agent_version( + id="id", + version_id="version_id", ) ``` @@ -7221,15 +10444,7 @@ client.flows.get(
-**id:** `str` — Unique identifier for Flow. - -
-
- -
-
- -**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve. +**id:** `str` — Unique identifier for Agent.
@@ -7237,7 +10452,7 @@ client.flows.get(
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. +**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7257,7 +10472,7 @@ client.flows.get(
-
client.flows.delete(...) +
client.agents.patch_agent_version(...)
@@ -7269,7 +10484,7 @@ client.flows.get(
-Delete the Flow with the given ID. +Update the name or description of the Agent version.
@@ -7289,8 +10504,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.delete( - id="fl_6o701g4jmcanPVHxdqD0O", +client.agents.patch_agent_version( + id="id", + version_id="version_id", ) ``` @@ -7307,7 +10523,31 @@ client.flows.delete(
-**id:** `str` — Unique identifier for Flow. +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version.
@@ -7327,7 +10567,7 @@ client.flows.delete(
-
client.flows.move(...) +
client.agents.get(...)
@@ -7339,7 +10579,10 @@ client.flows.delete(
-Move the Flow to a different path or change the name. +Retrieve the Agent with the given ID. + +By default, the deployed version of the Agent is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Agent.
@@ -7359,9 +10602,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.move( - id="fl_6o701g4jmcanPVHxdqD0O", - path="new directory/new name", +client.agents.get( + id="id", ) ``` @@ -7378,15 +10620,7 @@ client.flows.move(
-**id:** `str` — Unique identifier for Flow. - -
-
- -
-
- -**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier. +**id:** `str` — Unique identifier for Agent.
@@ -7394,7 +10628,7 @@ client.flows.move(
-**name:** `typing.Optional[str]` — Name of the Flow. +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
@@ -7402,7 +10636,7 @@ client.flows.move(
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`. +**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -7422,7 +10656,7 @@ client.flows.move(
-
client.flows.list(...) +
client.agents.delete(...)
@@ -7434,7 +10668,7 @@ client.flows.move(
-Get a list of Flows. +Delete the Agent with the given ID.
@@ -7454,14 +10688,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -response = client.flows.list( - size=1, +client.agents.delete( + id="id", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -7477,47 +10706,7 @@ for page in response.iter_pages():
-**page:** `typing.Optional[int]` — Page number for pagination. - -
-
- -
-
- -**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name. - -
-
- -
-
- -**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users. - -
-
- -
-
- -**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by - -
-
- -
-
- -**order:** `typing.Optional[SortOrder]` — Direction to sort by. +**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10726,7 @@ for page in response.iter_pages():
-
client.flows.upsert(...) +
client.agents.move(...)
@@ -7549,13 +10738,7 @@ for page in response.iter_pages():
-Create or update a Flow. - -Flows can also be identified by the `ID` or their `path`. - -You can provide `version_name` and `version_description` to identify and describe your versions. -Version names must be unique within a Flow - attempting to create a version with a name -that already exists will result in a 409 Conflict error. +Move the Agent to a different path or change the name.
@@ -7575,22 +10758,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.upsert( - path="Personal Projects/MedQA Flow", - attributes={ - "prompt": { - "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}", - "model": "gpt-4o", - "temperature": 0.8, - }, - "tool": { - "name": "retrieval_tool_v3", - "description": "Retrieval tool for MedQA.", - "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", - }, - "version_name": "medqa-flow-v1", - "version_description": "Initial version", - }, +client.agents.move( + id="id", ) ``` @@ -7607,15 +10776,7 @@ client.flows.upsert(
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version. - -
-
- -
-
- -**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. +**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10784,7 @@ client.flows.upsert(
-**id:** `typing.Optional[str]` — ID for an existing Flow. +**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10792,7 @@ client.flows.upsert(
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow. +**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10800,7 @@ client.flows.upsert(
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version. +**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10820,7 @@ client.flows.upsert(
-
client.flows.list_versions(...) +
client.agents.list_versions(...)
@@ -7671,7 +10832,7 @@ client.flows.upsert(
-Get a list of all the versions of a Flow. +Get a list of all the versions of a Agent.
@@ -7691,8 +10852,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.list_versions( - id="fl_6o701g4jmcanPVHxdqD0O", +client.agents.list_versions( + id="id", ) ``` @@ -7709,7 +10870,7 @@ client.flows.list_versions(
-**id:** `str` — Unique identifier for Flow. +**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +10898,7 @@ client.flows.list_versions(
-
client.flows.delete_flow_version(...) +
client.agents.set_deployment(...)
@@ -7749,7 +10910,10 @@ client.flows.list_versions(
-Delete a version of the Flow. +Deploy Agent to an Environment. + +Set the deployed version for the specified Environment. This Agent +will be used for calls made to the Agent in this Environment.
@@ -7769,8 +10933,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.delete_flow_version( +client.agents.set_deployment( id="id", + environment_id="environment_id", version_id="version_id", ) @@ -7788,7 +10953,7 @@ client.flows.delete_flow_version(
-**id:** `str` — Unique identifier for Flow. +**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +10961,15 @@ client.flows.delete_flow_version(
-**version_id:** `str` — Unique identifier for the specific version of the Flow. +**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +10989,7 @@ client.flows.delete_flow_version(
-
client.flows.update_flow_version(...) +
client.agents.remove_deployment(...)
@@ -7828,7 +11001,10 @@ client.flows.delete_flow_version(
-Update the name or description of the Flow version. +Remove deployed Agent from the Environment. + +Remove the deployed version for the specified Environment. This Agent +will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11024,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.update_flow_version( +client.agents.remove_deployment( id="id", - version_id="version_id", + environment_id="environment_id", ) ``` @@ -7867,23 +11043,7 @@ client.flows.update_flow_version(
-**id:** `str` — Unique identifier for Flow. - -
-
- -
-
- -**version_id:** `str` — Unique identifier for the specific version of the Flow. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Name of the version. +**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11051,7 @@ client.flows.update_flow_version(
-**description:** `typing.Optional[str]` — Description of the version. +**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11071,7 @@ client.flows.update_flow_version(
-
client.flows.set_deployment(...) +
client.agents.list_environments(...)
@@ -7923,10 +11083,7 @@ client.flows.update_flow_version(
-Deploy Flow to an Environment. - -Set the deployed version for the specified Environment. This Flow -will be used for calls made to the Flow in this Environment. +List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11103,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.set_deployment( - id="fl_6o701g4jmcanPVHxdqD0O", - environment_id="staging", - version_id="flv_6o701g4jmcanPVHxdqD0O", +client.agents.list_environments( + id="id", ) ``` @@ -7966,23 +11121,7 @@ client.flows.set_deployment(
-**id:** `str` — Unique identifier for Flow. - -
-
- -
-
- -**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. - -
-
- -
-
- -**version_id:** `str` — Unique identifier for the specific version of the Flow. +**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11141,7 @@ client.flows.set_deployment(
-
client.flows.remove_deployment(...) +
client.agents.update_monitoring(...)
@@ -8014,10 +11153,10 @@ client.flows.set_deployment(
-Remove deployed Flow from the Environment. +Activate and deactivate Evaluators for monitoring the Agent. -Remove the deployed version for the specified Environment. This Flow -will no longer be used for calls made to the Flow in this Environment. +An activated Evaluator will automatically be run on all new Logs +within the Agent for monitoring purposes.
@@ -8037,9 +11176,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.remove_deployment( - id="fl_6o701g4jmcanPVHxdqD0O", - environment_id="staging", +client.agents.update_monitoring( + id="id", ) ``` @@ -8056,7 +11194,7 @@ client.flows.remove_deployment(
-**id:** `str` — Unique identifier for Flow. +**id:** `str`
@@ -8064,7 +11202,19 @@ client.flows.remove_deployment(
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. +**activate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] +]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. + +
+
+ +
+
+ +**deactivate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] +]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11234,7 @@ client.flows.remove_deployment(
-
client.flows.list_environments(...) +
client.agents.serialize(...)
@@ -8096,7 +11246,13 @@ client.flows.remove_deployment(
-List all Environments and their deployed versions for the Flow. +Serialize an Agent to the .agent file format. + +Useful for storing the Agent with your code in a version control system, +or for editing with an AI tool. + +By default, the deployed version of the Agent is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11272,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.list_environments( - id="fl_6o701g4jmcanPVHxdqD0O", +client.agents.serialize( + id="id", ) ``` @@ -8134,7 +11290,23 @@ client.flows.list_environments(
-**id:** `str` — Unique identifier for Flow. +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11326,7 @@ client.flows.list_environments(
-
client.flows.update_monitoring(...) +
client.agents.deserialize(...)
@@ -8166,10 +11338,10 @@ client.flows.list_environments(
-Activate and deactivate Evaluators for monitoring the Flow. +Deserialize an Agent from the .agent file format. -An activated Evaluator will automatically be run on all new "completed" Logs -within the Flow for monitoring purposes. +This returns a subset of the attributes required by an Agent. +This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11361,8 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.update_monitoring( - id="fl_6o701g4jmcanPVHxdqD0O", - activate=[{"evaluator_version_id": "evv_1abc4308abd"}], +client.agents.deserialize( + agent="agent", ) ``` @@ -8208,27 +11379,7 @@ client.flows.update_monitoring(
-**id:** `str` - -
-
- -
-
- -**activate:** `typing.Optional[ - typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] -]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. - -
-
- -
-
- -**deactivate:** `typing.Optional[ - typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] -]` — Evaluators to deactivate. These will not be run on new Logs. +**agent:** `str`
@@ -10190,7 +13341,7 @@ for page in response.iter_pages():
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs. +**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py index 0c431892..f3396fd7 100644 --- a/src/humanloop/__init__.py +++ b/src/humanloop/__init__.py @@ -1,16 +1,45 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( + AgentCallResponse, + AgentCallResponseToolChoice, + AgentCallStreamResponse, + AgentCallStreamResponsePayload, AgentConfigResponse, + AgentContinueResponse, + AgentContinueResponseToolChoice, + AgentContinueStreamResponse, + AgentContinueStreamResponsePayload, + AgentInlineTool, + AgentKernelRequest, + AgentKernelRequestReasoningEffort, + AgentKernelRequestStop, + AgentKernelRequestTemplate, + AgentKernelRequestToolsItem, + AgentLinkedFileRequest, + AgentLinkedFileResponse, + AgentLinkedFileResponseFile, + AgentLogResponse, + AgentLogResponseToolChoice, + AgentLogStreamResponse, + AgentResponse, + AgentResponseReasoningEffort, + AgentResponseStop, + AgentResponseTemplate, + AgentResponseToolsItem, + AnthropicRedactedThinkingContent, + AnthropicThinkingContent, BaseModelsUserResponse, BooleanEvaluatorStatsResponse, ChatMessage, ChatMessageContent, ChatMessageContentItem, + ChatMessageThinkingItem, ChatRole, ChatToolType, CodeEvaluatorRequest, ConfigToolResponse, + CreateAgentLogResponse, CreateDatapointRequest, CreateDatapointRequestTargetValue, CreateEvaluatorLogResponse, @@ -55,10 +84,12 @@ EvaluatorReturnTypeEnum, EvaluatorVersionId, EvaluatorsRequest, + EventType, ExternalEvaluatorRequest, FeedbackType, FileEnvironmentResponse, FileEnvironmentResponseFile, + FileEnvironmentVariableRequest, FileId, FilePath, FileRequest, @@ -76,7 +107,9 @@ ImageUrl, ImageUrlDetail, InputResponse, + LinkedFileRequest, LinkedToolResponse, + ListAgents, ListDatasets, ListEvaluators, ListFlows, @@ -85,6 +118,7 @@ LlmEvaluatorRequest, LogResponse, LogStatus, + LogStreamResponse, ModelEndpoints, ModelProviders, MonitoringEvaluatorEnvironmentRequest, @@ -93,15 +127,18 @@ MonitoringEvaluatorVersionRequest, NumericEvaluatorStatsResponse, ObservabilityStatus, + OnAgentCallEnum, + OpenAiReasoningEffort, OverallStats, + PaginatedDataAgentResponse, PaginatedDataEvaluationLogResponse, PaginatedDataEvaluatorResponse, PaginatedDataFlowResponse, PaginatedDataLogResponse, PaginatedDataPromptResponse, PaginatedDataToolResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, PaginatedDatapointResponse, PaginatedDatasetResponse, PaginatedEvaluationResponse, @@ -110,6 +147,7 @@ PlatformAccessEnum, PopulateTemplateResponse, PopulateTemplateResponsePopulatedTemplate, + PopulateTemplateResponseReasoningEffort, PopulateTemplateResponseStop, PopulateTemplateResponseTemplate, ProjectSortBy, @@ -118,15 +156,16 @@ PromptCallResponseToolChoice, PromptCallStreamResponse, PromptKernelRequest, + PromptKernelRequestReasoningEffort, PromptKernelRequestStop, PromptKernelRequestTemplate, PromptLogResponse, PromptLogResponseToolChoice, PromptResponse, + PromptResponseReasoningEffort, PromptResponseStop, PromptResponseTemplate, ProviderApiKeys, - ReasoningEffort, ResponseFormat, ResponseFormatType, RunStatsResponse, @@ -139,6 +178,7 @@ TextEvaluatorStatsResponse, TimeUnit, ToolCall, + ToolCallResponse, ToolChoice, ToolFunction, ToolKernelRequest, @@ -162,7 +202,23 @@ VersionStatus, ) from .errors import UnprocessableEntityError -from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools +from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools +from .agents import ( + AgentLogRequestToolChoice, + AgentLogRequestToolChoiceParams, + AgentRequestReasoningEffort, + AgentRequestReasoningEffortParams, + AgentRequestStop, + AgentRequestStopParams, + AgentRequestTemplate, + AgentRequestTemplateParams, + AgentRequestToolsItem, + AgentRequestToolsItemParams, + AgentsCallRequestToolChoice, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestToolChoice, + AgentsCallStreamRequestToolChoiceParams, +) from .client import AsyncHumanloop, Humanloop from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints from .environment import HumanloopEnvironment @@ -190,6 +246,8 @@ PromptLogRequestToolChoiceParams, PromptLogUpdateRequestToolChoice, PromptLogUpdateRequestToolChoiceParams, + PromptRequestReasoningEffort, + PromptRequestReasoningEffortParams, PromptRequestStop, PromptRequestStopParams, PromptRequestTemplate, @@ -200,12 +258,41 @@ PromptsCallStreamRequestToolChoiceParams, ) from .requests import ( + AgentCallResponseParams, + AgentCallResponseToolChoiceParams, + AgentCallStreamResponseParams, + AgentCallStreamResponsePayloadParams, AgentConfigResponseParams, + AgentContinueResponseParams, + AgentContinueResponseToolChoiceParams, + AgentContinueStreamResponseParams, + AgentContinueStreamResponsePayloadParams, + AgentInlineToolParams, + AgentKernelRequestParams, + AgentKernelRequestReasoningEffortParams, + AgentKernelRequestStopParams, + AgentKernelRequestTemplateParams, + AgentKernelRequestToolsItemParams, + AgentLinkedFileRequestParams, + AgentLinkedFileResponseFileParams, + AgentLinkedFileResponseParams, + AgentLogResponseParams, + AgentLogResponseToolChoiceParams, + AgentLogStreamResponseParams, + AgentResponseParams, + AgentResponseReasoningEffortParams, + AgentResponseStopParams, + AgentResponseTemplateParams, + AgentResponseToolsItemParams, + AnthropicRedactedThinkingContentParams, + AnthropicThinkingContentParams, BooleanEvaluatorStatsResponseParams, ChatMessageContentItemParams, ChatMessageContentParams, ChatMessageParams, + ChatMessageThinkingItemParams, CodeEvaluatorRequestParams, + CreateAgentLogResponseParams, CreateDatapointRequestParams, CreateDatapointRequestTargetValueParams, CreateEvaluatorLogResponseParams, @@ -245,6 +332,7 @@ ExternalEvaluatorRequestParams, FileEnvironmentResponseFileParams, FileEnvironmentResponseParams, + FileEnvironmentVariableRequestParams, FileIdParams, FilePathParams, FileRequestParams, @@ -258,7 +346,9 @@ ImageChatContentParams, ImageUrlParams, InputResponseParams, + LinkedFileRequestParams, LinkedToolResponseParams, + ListAgentsParams, ListDatasetsParams, ListEvaluatorsParams, ListFlowsParams, @@ -266,24 +356,27 @@ ListToolsParams, LlmEvaluatorRequestParams, LogResponseParams, + LogStreamResponseParams, MonitoringEvaluatorEnvironmentRequestParams, MonitoringEvaluatorResponseParams, MonitoringEvaluatorVersionRequestParams, NumericEvaluatorStatsResponseParams, OverallStatsParams, + PaginatedDataAgentResponseParams, PaginatedDataEvaluationLogResponseParams, PaginatedDataEvaluatorResponseParams, PaginatedDataFlowResponseParams, PaginatedDataLogResponseParams, PaginatedDataPromptResponseParams, PaginatedDataToolResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, PaginatedDatapointResponseParams, PaginatedDatasetResponseParams, PaginatedEvaluationResponseParams, PopulateTemplateResponseParams, PopulateTemplateResponsePopulatedTemplateParams, + PopulateTemplateResponseReasoningEffortParams, PopulateTemplateResponseStopParams, PopulateTemplateResponseTemplateParams, PromptCallLogResponseParams, @@ -291,11 +384,13 @@ PromptCallResponseToolChoiceParams, PromptCallStreamResponseParams, PromptKernelRequestParams, + PromptKernelRequestReasoningEffortParams, PromptKernelRequestStopParams, PromptKernelRequestTemplateParams, PromptLogResponseParams, PromptLogResponseToolChoiceParams, PromptResponseParams, + PromptResponseReasoningEffortParams, PromptResponseStopParams, PromptResponseTemplateParams, ProviderApiKeysParams, @@ -307,6 +402,7 @@ TextChatContentParams, TextEvaluatorStatsResponseParams, ToolCallParams, + ToolCallResponseParams, ToolChoiceParams, ToolFunctionParams, ToolKernelRequestParams, @@ -329,8 +425,76 @@ __all__ = [ "AddEvaluatorsRequestEvaluatorsItem", "AddEvaluatorsRequestEvaluatorsItemParams", + "AgentCallResponse", + "AgentCallResponseParams", + "AgentCallResponseToolChoice", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponse", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayload", + "AgentCallStreamResponsePayloadParams", "AgentConfigResponse", "AgentConfigResponseParams", + "AgentContinueResponse", + "AgentContinueResponseParams", + "AgentContinueResponseToolChoice", + "AgentContinueResponseToolChoiceParams", + "AgentContinueStreamResponse", + "AgentContinueStreamResponseParams", + "AgentContinueStreamResponsePayload", + "AgentContinueStreamResponsePayloadParams", + "AgentInlineTool", + "AgentInlineToolParams", + "AgentKernelRequest", + "AgentKernelRequestParams", + "AgentKernelRequestReasoningEffort", + "AgentKernelRequestReasoningEffortParams", + "AgentKernelRequestStop", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplate", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItem", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequest", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentLogResponse", + "AgentLogResponseParams", + "AgentLogResponseToolChoice", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponse", + "AgentLogStreamResponseParams", + "AgentRequestReasoningEffort", + "AgentRequestReasoningEffortParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentResponse", + "AgentResponseParams", + "AgentResponseReasoningEffort", + "AgentResponseReasoningEffortParams", + "AgentResponseStop", + "AgentResponseStopParams", + "AgentResponseTemplate", + "AgentResponseTemplateParams", + "AgentResponseToolsItem", + "AgentResponseToolsItemParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", + "AnthropicRedactedThinkingContent", + "AnthropicRedactedThinkingContentParams", + "AnthropicThinkingContent", + "AnthropicThinkingContentParams", "AsyncHumanloop", "BaseModelsUserResponse", "BooleanEvaluatorStatsResponse", @@ -341,11 +505,15 @@ "ChatMessageContentItemParams", "ChatMessageContentParams", "ChatMessageParams", + "ChatMessageThinkingItem", + "ChatMessageThinkingItemParams", "ChatRole", "ChatToolType", "CodeEvaluatorRequest", "CodeEvaluatorRequestParams", "ConfigToolResponse", + "CreateAgentLogResponse", + "CreateAgentLogResponseParams", "CreateDatapointRequest", "CreateDatapointRequestParams", "CreateDatapointRequestTargetValue", @@ -438,6 +606,7 @@ "EvaluatorVersionId", "EvaluatorVersionIdParams", "EvaluatorsRequest", + "EventType", "ExternalEvaluatorRequest", "ExternalEvaluatorRequestParams", "FeedbackType", @@ -445,6 +614,8 @@ "FileEnvironmentResponseFile", "FileEnvironmentResponseFileParams", "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequest", + "FileEnvironmentVariableRequestParams", "FileId", "FileIdParams", "FilePath", @@ -477,8 +648,12 @@ "ImageUrlParams", "InputResponse", "InputResponseParams", + "LinkedFileRequest", + "LinkedFileRequestParams", "LinkedToolResponse", "LinkedToolResponseParams", + "ListAgents", + "ListAgentsParams", "ListDatasets", "ListDatasetsParams", "ListEvaluators", @@ -495,6 +670,8 @@ "LogResponse", "LogResponseParams", "LogStatus", + "LogStreamResponse", + "LogStreamResponseParams", "ModelEndpoints", "ModelProviders", "MonitoringEvaluatorEnvironmentRequest", @@ -507,8 +684,12 @@ "NumericEvaluatorStatsResponse", "NumericEvaluatorStatsResponseParams", "ObservabilityStatus", + "OnAgentCallEnum", + "OpenAiReasoningEffort", "OverallStats", "OverallStatsParams", + "PaginatedDataAgentResponse", + "PaginatedDataAgentResponseParams", "PaginatedDataEvaluationLogResponse", "PaginatedDataEvaluationLogResponseParams", "PaginatedDataEvaluatorResponse", @@ -521,10 +702,10 @@ "PaginatedDataPromptResponseParams", "PaginatedDataToolResponse", "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", "PaginatedDatapointResponse", "PaginatedDatapointResponseParams", "PaginatedDatasetResponse", @@ -538,6 +719,8 @@ "PopulateTemplateResponseParams", "PopulateTemplateResponsePopulatedTemplate", "PopulateTemplateResponsePopulatedTemplateParams", + "PopulateTemplateResponseReasoningEffort", + "PopulateTemplateResponseReasoningEffortParams", "PopulateTemplateResponseStop", "PopulateTemplateResponseStopParams", "PopulateTemplateResponseTemplate", @@ -553,6 +736,8 @@ "PromptCallStreamResponseParams", "PromptKernelRequest", "PromptKernelRequestParams", + "PromptKernelRequestReasoningEffort", + "PromptKernelRequestReasoningEffortParams", "PromptKernelRequestStop", "PromptKernelRequestStopParams", "PromptKernelRequestTemplate", @@ -565,12 +750,16 @@ "PromptLogResponseToolChoiceParams", "PromptLogUpdateRequestToolChoice", "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffort", + "PromptRequestReasoningEffortParams", "PromptRequestStop", "PromptRequestStopParams", "PromptRequestTemplate", "PromptRequestTemplateParams", "PromptResponse", "PromptResponseParams", + "PromptResponseReasoningEffort", + "PromptResponseReasoningEffortParams", "PromptResponseStop", "PromptResponseStopParams", "PromptResponseTemplate", @@ -581,7 +770,6 @@ "PromptsCallStreamRequestToolChoiceParams", "ProviderApiKeys", "ProviderApiKeysParams", - "ReasoningEffort", "ResponseFormat", "ResponseFormatParams", "ResponseFormatType", @@ -604,6 +792,8 @@ "TimeUnit", "ToolCall", "ToolCallParams", + "ToolCallResponse", + "ToolCallResponseParams", "ToolChoice", "ToolChoiceParams", "ToolFunction", @@ -643,6 +833,7 @@ "VersionStatsResponseParams", "VersionStatus", "__version__", + "agents", "datasets", "directories", "evaluations", diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py new file mode 100644 index 00000000..04260714 --- /dev/null +++ b/src/humanloop/agents/__init__.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AgentLogRequestToolChoice, + AgentRequestReasoningEffort, + AgentRequestStop, + AgentRequestTemplate, + AgentRequestToolsItem, + AgentsCallRequestToolChoice, + AgentsCallStreamRequestToolChoice, +) +from .requests import ( + AgentLogRequestToolChoiceParams, + AgentRequestReasoningEffortParams, + AgentRequestStopParams, + AgentRequestTemplateParams, + AgentRequestToolsItemParams, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestToolChoiceParams, +) + +__all__ = [ + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentRequestReasoningEffort", + "AgentRequestReasoningEffortParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py new file mode 100644 index 00000000..e6d46bea --- /dev/null +++ b/src/humanloop/agents/client.py @@ -0,0 +1,3188 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawAgentsClient +from ..requests.chat_message import ChatMessageParams +from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from ..requests.agent_kernel_request import AgentKernelRequestParams +import datetime as dt +from ..types.log_status import LogStatus +from ..core.request_options import RequestOptions +from ..types.create_agent_log_response import CreateAgentLogResponse +from ..types.log_response import LogResponse +from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..types.agent_call_stream_response import AgentCallStreamResponse +from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from ..types.agent_call_response import AgentCallResponse +from ..types.agent_continue_stream_response import AgentContinueStreamResponse +from ..types.agent_continue_response import AgentContinueResponse +from ..types.project_sort_by import ProjectSortBy +from ..types.sort_order import SortOrder +from ..types.paginated_data_agent_response import PaginatedDataAgentResponse +from ..types.model_endpoints import ModelEndpoints +from .requests.agent_request_template import AgentRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .requests.agent_request_stop import AgentRequestStopParams +from ..requests.response_format import ResponseFormatParams +from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .requests.agent_request_tools_item import AgentRequestToolsItemParams +from ..types.agent_response import AgentResponse +from ..types.list_agents import ListAgents +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..types.agent_kernel_request import AgentKernelRequest +from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawAgentsClient + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AgentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._raw_client = RawAgentsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawAgentsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawAgentsClient + """ + return self._raw_client + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.log() + """ + response = self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + agent=agent, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agent_log_request_environment=agent_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return response.data + + def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.update_log( + id="id", + log_id="log_id", + ) + """ + response = self._raw_client.update_log( + id, + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return response.data + + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[AgentCallStreamResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[AgentCallStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + response = client.agents.call_stream() + for chunk in response: + yield chunk + """ + with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_stream_request_environment=agents_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + yield from r.data + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.call() + """ + response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_request_environment=agents_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return response.data + + def continue_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[AgentContinueStreamResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[AgentContinueStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + response = client.agents.continue_stream( + log_id="log_id", + messages=[{"role": "user"}], + ) + for chunk in response: + yield chunk + """ + with self._raw_client.continue_stream( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + yield from r.data + + def continue_( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentContinueResponse: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentContinueResponse + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.continue_( + log_id="log_id", + messages=[{"role": "user"}], + ) + """ + response = self._raw_client.continue_( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return response.data + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataAgentResponse: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataAgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list() + """ + response = self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return response.data + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.upsert( + model="model", + ) + """ + response = self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + attributes=attributes, + max_iterations=max_iterations, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return response.data + + def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.delete_agent_version( + id="id", + version_id="version_id", + ) + """ + response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options) + return response.data + + def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.patch_agent_version( + id="id", + version_id="version_id", + ) + """ + response = self._raw_client.patch_agent_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return response.data + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.get( + id="id", + ) + """ + response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.delete( + id="id", + ) + """ + response = self._raw_client.delete(id, request_options=request_options) + return response.data + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.move( + id="id", + ) + """ + response = self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return response.data + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list_versions( + id="id", + ) + """ + response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return response.data + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.set_deployment( + id="id", + environment_id="environment_id", + version_id="version_id", + ) + """ + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return response.data + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.remove_deployment( + id="id", + environment_id="environment_id", + ) + """ + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list_environments( + id="id", + ) + """ + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.update_monitoring( + id="id", + ) + """ + response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return response.data + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.serialize( + id="id", + ) + """ + response = self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentKernelRequest + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.deserialize( + agent="agent", + ) + """ + response = self._raw_client.deserialize(agent=agent, request_options=request_options) + return response.data + + +class AsyncAgentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawAgentsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawAgentsClient + """ + return self._raw_client + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.log() + + + asyncio.run(main()) + """ + response = await self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + agent=agent, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agent_log_request_environment=agent_log_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return response.data + + async def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.update_log( + id="id", + log_id="log_id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.update_log( + id, + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, + request_options=request_options, + ) + return response.data + + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AgentCallStreamResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AgentCallStreamResponse] + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + response = await client.agents.call_stream() + async for chunk in response: + yield chunk + + + asyncio.run(main()) + """ + async with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_stream_request_environment=agents_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + async for data in r.data: + yield data + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.call() + + + asyncio.run(main()) + """ + response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + agent=agent, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + agents_call_request_environment=agents_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + return_inputs=return_inputs, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return response.data + + async def continue_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AgentContinueStreamResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AgentContinueStreamResponse] + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + response = await client.agents.continue_stream( + log_id="log_id", + messages=[{"role": "user"}], + ) + async for chunk in response: + yield chunk + + + asyncio.run(main()) + """ + async with self._raw_client.continue_stream( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) as r: + async for data in r.data: + yield data + + async def continue_( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentContinueResponse: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentContinueResponse + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.continue_( + log_id="log_id", + messages=[{"role": "user"}], + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.continue_( + log_id=log_id, + messages=messages, + provider_api_keys=provider_api_keys, + include_trace_children=include_trace_children, + request_options=request_options, + ) + return response.data + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataAgentResponse: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataAgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list() + + + asyncio.run(main()) + """ + response = await self._raw_client.list( + page=page, + size=size, + name=name, + user_filter=user_filter, + sort_by=sort_by, + order=order, + request_options=request_options, + ) + return response.data + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.upsert( + model="model", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + attributes=attributes, + max_iterations=max_iterations, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, + request_options=request_options, + ) + return response.data + + async def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.delete_agent_version( + id="id", + version_id="version_id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options) + return response.data + + async def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.patch_agent_version( + id="id", + version_id="version_id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.patch_agent_version( + id, version_id, name=name, description=description, request_options=request_options + ) + return response.data + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.get( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.delete( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.delete(id, request_options=request_options) + return response.data + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.move( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options + ) + return response.data + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list_versions( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options + ) + return response.data + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.set_deployment( + id="id", + environment_id="environment_id", + version_id="version_id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options + ) + return response.data + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.remove_deployment( + id="id", + environment_id="environment_id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list_environments( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.update_monitoring( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options + ) + return response.data + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.serialize( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + async def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentKernelRequest: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentKernelRequest + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.deserialize( + agent="agent", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.deserialize(agent=agent, request_options=request_options) + return response.data diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py new file mode 100644 index 00000000..4957e33b --- /dev/null +++ b/src/humanloop/agents/raw_client.py @@ -0,0 +1,3869 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..requests.chat_message import ChatMessageParams +from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from ..requests.agent_kernel_request import AgentKernelRequestParams +import datetime as dt +from ..types.log_status import LogStatus +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.create_agent_log_response import CreateAgentLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.log_response import LogResponse +from ..core.jsonable_encoder import jsonable_encoder +from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..types.agent_call_stream_response import AgentCallStreamResponse +import httpx_sse +import contextlib +from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from ..types.agent_call_response import AgentCallResponse +from ..types.agent_continue_stream_response import AgentContinueStreamResponse +from ..types.agent_continue_response import AgentContinueResponse +from ..types.project_sort_by import ProjectSortBy +from ..types.sort_order import SortOrder +from ..types.paginated_data_agent_response import PaginatedDataAgentResponse +from ..types.model_endpoints import ModelEndpoints +from .requests.agent_request_template import AgentRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .requests.agent_request_stop import AgentRequestStopParams +from ..requests.response_format import ResponseFormatParams +from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .requests.agent_request_tools_item import AgentRequestToolsItemParams +from ..types.agent_response import AgentResponse +from ..types.list_agents import ListAgents +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..types.agent_kernel_request import AgentKernelRequest +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawAgentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateAgentLogResponse]: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateAgentLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.contextmanager + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield stream() + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentCallResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentCallResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.contextmanager + def continue_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield stream() + + def continue_( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentContinueResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentContinueResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentContinueResponse, + construct_type( + type_=AgentContinueResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PaginatedDataAgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PaginatedDataAgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListAgents]: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListAgents] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentResponse]: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[AgentResponse]: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[None]: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[AgentKernelRequest]: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[AgentKernelRequest] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "agents/deserialize", + method="POST", + json={ + "agent": agent, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentKernelRequest, + construct_type( + type_=AgentKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawAgentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateAgentLogResponse]: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateAgentLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.asynccontextmanager + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield await stream() + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentCallResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentCallResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.asynccontextmanager + async def continue_stream( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield await stream() + + async def continue_( + self, + *, + log_id: str, + messages: typing.Sequence[ChatMessageParams], + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + include_trace_children: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentContinueResponse]: + """ + Continue an incomplete Agent call. + + This endpoint allows continuing an existing incomplete Agent call, using the context + from the previous interaction. The Agent will resume processing from where it left off. + + The original log must be in an incomplete state to be continued. + + The messages in the request will be appended + to the original messages in the log. + + Parameters + ---------- + log_id : str + This identifies the Agent Log to continue. + + messages : typing.Sequence[ChatMessageParams] + The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + include_trace_children : typing.Optional[bool] + If true, populate `trace_children` for the returned Agent Log. Defaults to false. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentContinueResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/continue", + method="POST", + json={ + "log_id": log_id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "include_trace_children": include_trace_children, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentContinueResponse, + construct_type( + type_=AgentContinueResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PaginatedDataAgentResponse]: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PaginatedDataAgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and + tools determine the versions of the Agent. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Agent - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write" + ), + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def patch_agent_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Update the name or description of the Agent version. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListAgents]: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListAgents] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentResponse]: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[AgentResponse]: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[None]: + """ + Serialize an Agent to the .agent file format. + + Useful for storing the Agent with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def deserialize( + self, *, agent: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[AgentKernelRequest]: + """ + Deserialize an Agent from the .agent file format. + + This returns a subset of the attributes required by an Agent. + This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + agent : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[AgentKernelRequest] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/deserialize", + method="POST", + json={ + "agent": agent, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + AgentKernelRequest, + construct_type( + type_=AgentKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py new file mode 100644 index 00000000..78a8f9ec --- /dev/null +++ b/src/humanloop/agents/requests/__init__.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams +from .agent_request_stop import AgentRequestStopParams +from .agent_request_template import AgentRequestTemplateParams +from .agent_request_tools_item import AgentRequestToolsItemParams +from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams + +__all__ = [ + "AgentLogRequestToolChoiceParams", + "AgentRequestReasoningEffortParams", + "AgentRequestStopParams", + "AgentRequestTemplateParams", + "AgentRequestToolsItemParams", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py new file mode 100644 index 00000000..584112aa --- /dev/null +++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentLogRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py new file mode 100644 index 00000000..98a991cd --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py new file mode 100644 index 00000000..3970451c --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py new file mode 100644 index 00000000..c251ce8e --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.chat_message import ChatMessageParams + +AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py new file mode 100644 index 00000000..20cde136 --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams +from ...requests.agent_inline_tool import AgentInlineToolParams + +AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py new file mode 100644 index 00000000..1e468fa0 --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..bd068b6f --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallStreamRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py new file mode 100644 index 00000000..73d98669 --- /dev/null +++ b/src/humanloop/agents/types/__init__.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agent_log_request_tool_choice import AgentLogRequestToolChoice +from .agent_request_reasoning_effort import AgentRequestReasoningEffort +from .agent_request_stop import AgentRequestStop +from .agent_request_template import AgentRequestTemplate +from .agent_request_tools_item import AgentRequestToolsItem +from .agents_call_request_tool_choice import AgentsCallRequestToolChoice +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice + +__all__ = [ + "AgentLogRequestToolChoice", + "AgentRequestReasoningEffort", + "AgentRequestStop", + "AgentRequestTemplate", + "AgentRequestToolsItem", + "AgentsCallRequestToolChoice", + "AgentsCallStreamRequestToolChoice", +] diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py new file mode 100644 index 00000000..bfb576c2 --- /dev/null +++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentLogRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py new file mode 100644 index 00000000..b4267202 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py new file mode 100644 index 00000000..325a6b2e --- /dev/null +++ b/src/humanloop/agents/types/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py new file mode 100644 index 00000000..f6474824 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.chat_message import ChatMessage + +AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py new file mode 100644 index 00000000..e6c54b88 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.agent_linked_file_request import AgentLinkedFileRequest +from ...types.agent_inline_tool import AgentInlineTool + +AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py new file mode 100644 index 00000000..6dee5a04 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentsCallRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..83d264f0 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentsCallStreamRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py index bf72be6a..a11298b8 100644 --- a/src/humanloop/base_client.py +++ b/src/humanloop/base_client.py @@ -11,6 +11,7 @@ from .datasets.client import DatasetsClient from .evaluators.client import EvaluatorsClient from .flows.client import FlowsClient +from .agents.client import AgentsClient from .directories.client import DirectoriesClient from .files.client import FilesClient from .evaluations.client import EvaluationsClient @@ -21,6 +22,7 @@ from .datasets.client import AsyncDatasetsClient from .evaluators.client import AsyncEvaluatorsClient from .flows.client import AsyncFlowsClient +from .agents.client import AsyncAgentsClient from .directories.client import AsyncDirectoriesClient from .files.client import AsyncFilesClient from .evaluations.client import AsyncEvaluationsClient @@ -96,6 +98,7 @@ def __init__( self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) self.flows = FlowsClient(client_wrapper=self._client_wrapper) + self.agents = AgentsClient(client_wrapper=self._client_wrapper) self.directories = DirectoriesClient(client_wrapper=self._client_wrapper) self.files = FilesClient(client_wrapper=self._client_wrapper) self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) @@ -171,6 +174,7 @@ def __init__( self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper) + self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper) self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper) self.files = AsyncFilesClient(client_wrapper=self._client_wrapper) self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index f25dc2ca..94cf9db0 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { - "User-Agent": "humanloop/0.8.35", + "User-Agent": "humanloop/0.8.36", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.35", + "X-Fern-SDK-Version": "0.8.36", } headers["X-API-KEY"] = self.api_key return headers diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py index c07358d0..693b46cb 100644 --- a/src/humanloop/files/client.py +++ b/src/humanloop/files/client.py @@ -7,8 +7,8 @@ from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.request_options import RequestOptions -from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse from ..core.client_wrapper import AsyncClientWrapper @@ -45,7 +45,7 @@ def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse: + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: """ Get a paginated list of files. @@ -80,7 +80,7 @@ def list_files( Returns ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse Successful Response Examples @@ -175,7 +175,7 @@ async def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse: + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: """ Get a paginated list of files. @@ -210,7 +210,7 @@ async def list_files( Returns ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse Successful Response Examples diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py index 19f52cf2..01b48e03 100644 --- a/src/humanloop/files/raw_client.py +++ b/src/humanloop/files/raw_client.py @@ -7,8 +7,8 @@ from ..types.sort_order import SortOrder from ..core.request_options import RequestOptions from ..core.http_response import HttpResponse -from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError @@ -39,7 +39,9 @@ def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]: + ) -> HttpResponse[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + ]: """ Get a paginated list of files. @@ -74,7 +76,7 @@ def list_files( Returns ------- - HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse] + HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] Successful Response """ _response = self._client_wrapper.httpx_client.request( @@ -95,9 +97,9 @@ def list_files( try: if 200 <= _response.status_code < 300: _data = typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore object_=_response.json(), ), ) @@ -200,7 +202,9 @@ async def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]: + ) -> AsyncHttpResponse[ + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse + ]: """ Get a paginated list of files. @@ -235,7 +239,7 @@ async def list_files( Returns ------- - AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse] + AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse] Successful Response """ _response = await self._client_wrapper.httpx_client.request( @@ -256,9 +260,9 @@ async def list_files( try: if 200 <= _response.status_code < 300: _data = typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore object_=_response.json(), ), ) diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py index c1618edb..8c070ab3 100644 --- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py +++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -6,7 +6,13 @@ from ...requests.dataset_response import DatasetResponseParams from ...requests.evaluator_response import EvaluatorResponseParams from ...requests.flow_response import FlowResponseParams +from ...requests.agent_response import AgentResponseParams RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py index 48415fc9..46ea271a 100644 --- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py +++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -6,7 +6,8 @@ from ...types.dataset_response import DatasetResponse from ...types.evaluator_response import EvaluatorResponse from ...types.flow_response import FlowResponse +from ...types.agent_response import AgentResponse RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py index a11776fc..bcb9491c 100644 --- a/src/humanloop/flows/client.py +++ b/src/humanloop/flows/client.py @@ -214,10 +214,10 @@ def log( output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", log_status="incomplete", start_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:35+00:00", + "2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:39+00:00", + "2024-07-08 21:40:39+00:00", ), ) """ @@ -1128,10 +1128,10 @@ async def main() -> None: output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.", log_status="incomplete", start_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:35+00:00", + "2024-07-08 21:40:35+00:00", ), end_time=datetime.datetime.fromisoformat( - "2024-07-08 19:40:39+00:00", + "2024-07-08 21:40:39+00:00", ), ) diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py index 17007c1b..b16d1f6b 100644 --- a/src/humanloop/logs/client.py +++ b/src/humanloop/logs/client.py @@ -99,7 +99,7 @@ def list( If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs. + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -338,7 +338,7 @@ async def list( If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.) include_trace_children : typing.Optional[bool] - If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs. + If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs. request_options : typing.Optional[RequestOptions] Request-specific configuration. diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py index c1147ff2..ae141d57 100644 --- a/src/humanloop/prompts/__init__.py +++ b/src/humanloop/prompts/__init__.py @@ -3,6 +3,7 @@ from .types import ( PromptLogRequestToolChoice, PromptLogUpdateRequestToolChoice, + PromptRequestReasoningEffort, PromptRequestStop, PromptRequestTemplate, PromptsCallRequestToolChoice, @@ -11,6 +12,7 @@ from .requests import ( PromptLogRequestToolChoiceParams, PromptLogUpdateRequestToolChoiceParams, + PromptRequestReasoningEffortParams, PromptRequestStopParams, PromptRequestTemplateParams, PromptsCallRequestToolChoiceParams, @@ -22,6 +24,8 @@ "PromptLogRequestToolChoiceParams", "PromptLogUpdateRequestToolChoice", "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffort", + "PromptRequestReasoningEffortParams", "PromptRequestStop", "PromptRequestStopParams", "PromptRequestTemplate", diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py index e2fff4c3..d5de327b 100644 --- a/src/humanloop/prompts/client.py +++ b/src/humanloop/prompts/client.py @@ -33,7 +33,7 @@ from ..types.model_providers import ModelProviders from .requests.prompt_request_stop import PromptRequestStopParams from ..requests.response_format import ResponseFormatParams -from ..types.reasoning_effort import ReasoningEffort +from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams from ..requests.tool_function import ToolFunctionParams from ..types.populate_template_response import PopulateTemplateResponse from ..types.list_prompts import ListPrompts @@ -44,6 +44,7 @@ from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( EvaluatorActivationDeactivationRequestDeactivateItemParams, ) +from ..types.prompt_kernel_request import PromptKernelRequest from ..core.client_wrapper import AsyncClientWrapper from .raw_client import AsyncRawPromptsClient from ..core.pagination import AsyncPager @@ -256,7 +257,7 @@ def log( messages=[{"role": "user", "content": "What really happened at Roswell?"}], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-18 21:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -962,7 +963,7 @@ def upsert( other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, seed: typing.Optional[int] = OMIT, response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, @@ -1037,8 +1038,8 @@ def upsert( response_format : typing.Optional[ResponseFormatParams] The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - reasoning_effort : typing.Optional[ReasoningEffort] - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. tools : typing.Optional[typing.Sequence[ToolFunctionParams]] The tool specification that the model can choose to call if Tool calling is supported. @@ -1599,6 +1600,92 @@ def update_monitoring( ) return response.data + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.prompts.serialize( + id="id", + ) + """ + response = self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptKernelRequest: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptKernelRequest + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.prompts.deserialize( + prompt="prompt", + ) + """ + response = self._raw_client.deserialize(prompt=prompt, request_options=request_options) + return response.data + class AsyncPromptsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -1810,7 +1897,7 @@ async def main() -> None: ], inputs={"person": "Trump"}, created_at=datetime.datetime.fromisoformat( - "2024-07-18 21:29:35.178000+00:00", + "2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={ @@ -2552,7 +2639,7 @@ async def upsert( other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, seed: typing.Optional[int] = OMIT, response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, @@ -2627,8 +2714,8 @@ async def upsert( response_format : typing.Optional[ResponseFormatParams] The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - reasoning_effort : typing.Optional[ReasoningEffort] - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. tools : typing.Optional[typing.Sequence[ToolFunctionParams]] The tool specification that the model can choose to call if Tool calling is supported. @@ -3284,3 +3371,105 @@ async def main() -> None: id, activate=activate, deactivate=deactivate, request_options=request_options ) return response.data + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> None: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.prompts.serialize( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.serialize( + id, version_id=version_id, environment=environment, request_options=request_options + ) + return response.data + + async def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> PromptKernelRequest: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PromptKernelRequest + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.prompts.deserialize( + prompt="prompt", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options) + return response.data diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py index b5334c82..2b907d91 100644 --- a/src/humanloop/prompts/raw_client.py +++ b/src/humanloop/prompts/raw_client.py @@ -32,7 +32,7 @@ from ..types.model_providers import ModelProviders from .requests.prompt_request_stop import PromptRequestStopParams from ..requests.response_format import ResponseFormatParams -from ..types.reasoning_effort import ReasoningEffort +from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams from ..requests.tool_function import ToolFunctionParams from ..types.prompt_response import PromptResponse from ..types.populate_template_response import PopulateTemplateResponse @@ -44,6 +44,7 @@ from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( EvaluatorActivationDeactivationRequestDeactivateItemParams, ) +from ..types.prompt_kernel_request import PromptKernelRequest from ..core.client_wrapper import AsyncClientWrapper from ..core.http_response import AsyncHttpResponse @@ -915,7 +916,7 @@ def upsert( other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, seed: typing.Optional[int] = OMIT, response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, @@ -990,8 +991,8 @@ def upsert( response_format : typing.Optional[ResponseFormatParams] The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - reasoning_effort : typing.Optional[ReasoningEffort] - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. tools : typing.Optional[typing.Sequence[ToolFunctionParams]] The tool specification that the model can choose to call if Tool calling is supported. @@ -1051,7 +1052,9 @@ def upsert( "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormatParams, direction="write" ), - "reasoning_effort": reasoning_effort, + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" + ), "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" ), @@ -1744,6 +1747,126 @@ def update_monitoring( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[None]: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[PromptKernelRequest]: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptKernelRequest] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/deserialize", + method="POST", + json={ + "prompt": prompt, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptKernelRequest, + construct_type( + type_=PromptKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + class AsyncRawPromptsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -2609,7 +2732,7 @@ async def upsert( other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, seed: typing.Optional[int] = OMIT, response_format: typing.Optional[ResponseFormatParams] = OMIT, - reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT, tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, @@ -2684,8 +2807,8 @@ async def upsert( response_format : typing.Optional[ResponseFormatParams] The format of the response. Only `{"type": "json_object"}` is currently supported for chat. - reasoning_effort : typing.Optional[ReasoningEffort] - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams] + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. tools : typing.Optional[typing.Sequence[ToolFunctionParams]] The tool specification that the model can choose to call if Tool calling is supported. @@ -2745,7 +2868,9 @@ async def upsert( "response_format": convert_and_respect_annotation_metadata( object_=response_format, annotation=ResponseFormatParams, direction="write" ), - "reasoning_effort": reasoning_effort, + "reasoning_effort": convert_and_respect_annotation_metadata( + object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write" + ), "tools": convert_and_respect_annotation_metadata( object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" ), @@ -3439,3 +3564,123 @@ async def update_monitoring( except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def serialize( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[None]: + """ + Serialize a Prompt to the .prompt file format. + + Useful for storing the Prompt with your code in a version control system, + or for editing with an AI tool. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/serialize", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def deserialize( + self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[PromptKernelRequest]: + """ + Deserialize a Prompt from the .prompt file format. + + This returns a subset of the attributes required by a Prompt. + This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc) + + Parameters + ---------- + prompt : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptKernelRequest] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/deserialize", + method="POST", + json={ + "prompt": prompt, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptKernelRequest, + construct_type( + type_=PromptKernelRequest, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py index c5119552..3971e252 100644 --- a/src/humanloop/prompts/requests/__init__.py +++ b/src/humanloop/prompts/requests/__init__.py @@ -2,6 +2,7 @@ from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams +from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams from .prompt_request_stop import PromptRequestStopParams from .prompt_request_template import PromptRequestTemplateParams from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams @@ -10,6 +11,7 @@ __all__ = [ "PromptLogRequestToolChoiceParams", "PromptLogUpdateRequestToolChoiceParams", + "PromptRequestReasoningEffortParams", "PromptRequestStopParams", "PromptRequestTemplateParams", "PromptsCallRequestToolChoiceParams", diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py new file mode 100644 index 00000000..080a107e --- /dev/null +++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py index 644cf6b5..1b849e7d 100644 --- a/src/humanloop/prompts/types/__init__.py +++ b/src/humanloop/prompts/types/__init__.py @@ -2,6 +2,7 @@ from .prompt_log_request_tool_choice import PromptLogRequestToolChoice from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice +from .prompt_request_reasoning_effort import PromptRequestReasoningEffort from .prompt_request_stop import PromptRequestStop from .prompt_request_template import PromptRequestTemplate from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice @@ -10,6 +11,7 @@ __all__ = [ "PromptLogRequestToolChoice", "PromptLogUpdateRequestToolChoice", + "PromptRequestReasoningEffort", "PromptRequestStop", "PromptRequestTemplate", "PromptsCallRequestToolChoice", diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py new file mode 100644 index 00000000..33f35288 --- /dev/null +++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py index bd9458ba..ba9f74af 100644 --- a/src/humanloop/requests/__init__.py +++ b/src/humanloop/requests/__init__.py @@ -1,11 +1,40 @@ # This file was auto-generated by Fern from our API Definition. +from .agent_call_response import AgentCallResponseParams +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .agent_call_stream_response import AgentCallStreamResponseParams +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams from .agent_config_response import AgentConfigResponseParams +from .agent_continue_response import AgentContinueResponseParams +from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams +from .agent_continue_stream_response import AgentContinueStreamResponseParams +from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams +from .agent_inline_tool import AgentInlineToolParams +from .agent_kernel_request import AgentKernelRequestParams +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams +from .agent_kernel_request_stop import AgentKernelRequestStopParams +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams +from .agent_linked_file_request import AgentLinkedFileRequestParams +from .agent_linked_file_response import AgentLinkedFileResponseParams +from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams +from .agent_log_response import AgentLogResponseParams +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_log_stream_response import AgentLogStreamResponseParams +from .agent_response import AgentResponseParams +from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams +from .agent_response_stop import AgentResponseStopParams +from .agent_response_template import AgentResponseTemplateParams +from .agent_response_tools_item import AgentResponseToolsItemParams +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams +from .anthropic_thinking_content import AnthropicThinkingContentParams from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams from .chat_message import ChatMessageParams from .chat_message_content import ChatMessageContentParams from .chat_message_content_item import ChatMessageContentItemParams +from .chat_message_thinking_item import ChatMessageThinkingItemParams from .code_evaluator_request import CodeEvaluatorRequestParams +from .create_agent_log_response import CreateAgentLogResponseParams from .create_datapoint_request import CreateDatapointRequestParams from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams from .create_evaluator_log_response import CreateEvaluatorLogResponseParams @@ -51,6 +80,7 @@ from .external_evaluator_request import ExternalEvaluatorRequestParams from .file_environment_response import FileEnvironmentResponseParams from .file_environment_response_file import FileEnvironmentResponseFileParams +from .file_environment_variable_request import FileEnvironmentVariableRequestParams from .file_id import FileIdParams from .file_path import FilePathParams from .file_request import FileRequestParams @@ -64,7 +94,9 @@ from .image_chat_content import ImageChatContentParams from .image_url import ImageUrlParams from .input_response import InputResponseParams +from .linked_file_request import LinkedFileRequestParams from .linked_tool_response import LinkedToolResponseParams +from .list_agents import ListAgentsParams from .list_datasets import ListDatasetsParams from .list_evaluators import ListEvaluatorsParams from .list_flows import ListFlowsParams @@ -72,28 +104,31 @@ from .list_tools import ListToolsParams from .llm_evaluator_request import LlmEvaluatorRequestParams from .log_response import LogResponseParams +from .log_stream_response import LogStreamResponseParams from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams from .overall_stats import OverallStatsParams +from .paginated_data_agent_response import PaginatedDataAgentResponseParams from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams from .paginated_data_flow_response import PaginatedDataFlowResponseParams from .paginated_data_log_response import PaginatedDataLogResponseParams from .paginated_data_prompt_response import PaginatedDataPromptResponseParams from .paginated_data_tool_response import PaginatedDataToolResponseParams -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, ) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, ) from .paginated_datapoint_response import PaginatedDatapointResponseParams from .paginated_dataset_response import PaginatedDatasetResponseParams from .paginated_evaluation_response import PaginatedEvaluationResponseParams from .populate_template_response import PopulateTemplateResponseParams from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams from .populate_template_response_stop import PopulateTemplateResponseStopParams from .populate_template_response_template import PopulateTemplateResponseTemplateParams from .prompt_call_log_response import PromptCallLogResponseParams @@ -101,11 +136,13 @@ from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams from .prompt_call_stream_response import PromptCallStreamResponseParams from .prompt_kernel_request import PromptKernelRequestParams +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams from .prompt_kernel_request_stop import PromptKernelRequestStopParams from .prompt_kernel_request_template import PromptKernelRequestTemplateParams from .prompt_log_response import PromptLogResponseParams from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams from .prompt_response import PromptResponseParams +from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams from .prompt_response_stop import PromptResponseStopParams from .prompt_response_template import PromptResponseTemplateParams from .provider_api_keys import ProviderApiKeysParams @@ -117,6 +154,7 @@ from .text_chat_content import TextChatContentParams from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams from .tool_call import ToolCallParams +from .tool_call_response import ToolCallResponseParams from .tool_choice import ToolChoiceParams from .tool_function import ToolFunctionParams from .tool_kernel_request import ToolKernelRequestParams @@ -135,12 +173,41 @@ from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams __all__ = [ + "AgentCallResponseParams", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayloadParams", "AgentConfigResponseParams", + "AgentContinueResponseParams", + "AgentContinueResponseToolChoiceParams", + "AgentContinueStreamResponseParams", + "AgentContinueStreamResponsePayloadParams", + "AgentInlineToolParams", + "AgentKernelRequestParams", + "AgentKernelRequestReasoningEffortParams", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogResponseParams", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponseParams", + "AgentResponseParams", + "AgentResponseReasoningEffortParams", + "AgentResponseStopParams", + "AgentResponseTemplateParams", + "AgentResponseToolsItemParams", + "AnthropicRedactedThinkingContentParams", + "AnthropicThinkingContentParams", "BooleanEvaluatorStatsResponseParams", "ChatMessageContentItemParams", "ChatMessageContentParams", "ChatMessageParams", + "ChatMessageThinkingItemParams", "CodeEvaluatorRequestParams", + "CreateAgentLogResponseParams", "CreateDatapointRequestParams", "CreateDatapointRequestTargetValueParams", "CreateEvaluatorLogResponseParams", @@ -180,6 +247,7 @@ "ExternalEvaluatorRequestParams", "FileEnvironmentResponseFileParams", "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequestParams", "FileIdParams", "FilePathParams", "FileRequestParams", @@ -193,7 +261,9 @@ "ImageChatContentParams", "ImageUrlParams", "InputResponseParams", + "LinkedFileRequestParams", "LinkedToolResponseParams", + "ListAgentsParams", "ListDatasetsParams", "ListEvaluatorsParams", "ListFlowsParams", @@ -201,24 +271,27 @@ "ListToolsParams", "LlmEvaluatorRequestParams", "LogResponseParams", + "LogStreamResponseParams", "MonitoringEvaluatorEnvironmentRequestParams", "MonitoringEvaluatorResponseParams", "MonitoringEvaluatorVersionRequestParams", "NumericEvaluatorStatsResponseParams", "OverallStatsParams", + "PaginatedDataAgentResponseParams", "PaginatedDataEvaluationLogResponseParams", "PaginatedDataEvaluatorResponseParams", "PaginatedDataFlowResponseParams", "PaginatedDataLogResponseParams", "PaginatedDataPromptResponseParams", "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", "PaginatedDatapointResponseParams", "PaginatedDatasetResponseParams", "PaginatedEvaluationResponseParams", "PopulateTemplateResponseParams", "PopulateTemplateResponsePopulatedTemplateParams", + "PopulateTemplateResponseReasoningEffortParams", "PopulateTemplateResponseStopParams", "PopulateTemplateResponseTemplateParams", "PromptCallLogResponseParams", @@ -226,11 +299,13 @@ "PromptCallResponseToolChoiceParams", "PromptCallStreamResponseParams", "PromptKernelRequestParams", + "PromptKernelRequestReasoningEffortParams", "PromptKernelRequestStopParams", "PromptKernelRequestTemplateParams", "PromptLogResponseParams", "PromptLogResponseToolChoiceParams", "PromptResponseParams", + "PromptResponseReasoningEffortParams", "PromptResponseStopParams", "PromptResponseTemplateParams", "ProviderApiKeysParams", @@ -242,6 +317,7 @@ "TextChatContentParams", "TextEvaluatorStatsResponseParams", "ToolCallParams", + "ToolCallResponseParams", "ToolChoiceParams", "ToolFunctionParams", "ToolKernelRequestParams", diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py new file mode 100644 index 00000000..ffc925ec --- /dev/null +++ b/src/humanloop/requests/agent_call_response.py @@ -0,0 +1,202 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from .chat_message import ChatMessageParams +import typing +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .agent_response import AgentResponseParams +import datetime as dt +from ..types.log_status import LogStatus +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class AgentCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Agent call. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py new file mode 100644 index 00000000..6cc9f9c4 --- /dev/null +++ b/src/humanloop/requests/agent_call_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoiceParams + +AgentCallResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py new file mode 100644 index 00000000..082d6265 --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams +from ..types.event_type import EventType + + +class AgentCallStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams] + type: EventType diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py new file mode 100644 index 00000000..0e08a6f3 --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponseParams +from .log_response import LogResponseParams +from .tool_call import ToolCallParams + +AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py new file mode 100644 index 00000000..8300667b --- /dev/null +++ b/src/humanloop/requests/agent_continue_response.py @@ -0,0 +1,202 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from .chat_message import ChatMessageParams +import typing +from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams +from .agent_response import AgentResponseParams +import datetime as dt +from ..types.log_status import LogStatus +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class AgentContinueResponseParams(typing_extensions.TypedDict): + """ + Response model for continuing an Agent call. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py new file mode 100644 index 00000000..24b044cc --- /dev/null +++ b/src/humanloop/requests/agent_continue_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoiceParams + +AgentContinueResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py new file mode 100644 index 00000000..1ba31575 --- /dev/null +++ b/src/humanloop/requests/agent_continue_stream_response.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams +from ..types.event_type import EventType + + +class AgentContinueStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for continuing an Agent call in streaming mode. + """ + + log_id: str + message: str + payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams] + type: EventType diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py new file mode 100644 index 00000000..ddd74c10 --- /dev/null +++ b/src/humanloop/requests/agent_continue_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponseParams +from .log_response import LogResponseParams +from .tool_call import ToolCallParams + +AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py new file mode 100644 index 00000000..31f9401a --- /dev/null +++ b/src/humanloop/requests/agent_inline_tool.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .tool_function import ToolFunctionParams +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum + + +class AgentInlineToolParams(typing_extensions.TypedDict): + type: typing.Literal["inline"] + json_schema: ToolFunctionParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py new file mode 100644 index 00000000..0ca76571 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request.py @@ -0,0 +1,112 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .agent_kernel_request_stop import AgentKernelRequestStopParams +import typing +from .response_format import ResponseFormatParams +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams + + +class AgentKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentKernelRequestStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]] + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..ea32bc11 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py new file mode 100644 index 00000000..eae95d35 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py new file mode 100644 index 00000000..7261667d --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessageParams + +AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..27b63984 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .agent_linked_file_request import AgentLinkedFileRequestParams +from .agent_inline_tool import AgentInlineToolParams + +AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py new file mode 100644 index 00000000..18fc2274 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_request.py @@ -0,0 +1,13 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .linked_file_request import LinkedFileRequestParams +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum + + +class AgentLinkedFileRequestParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py new file mode 100644 index 00000000..8a690a77 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing +from .linked_file_request import LinkedFileRequestParams +import typing_extensions +from ..types.on_agent_call_enum import OnAgentCallEnum +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams + + +class AgentLinkedFileResponseParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams + on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum] + file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"] diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py new file mode 100644 index 00000000..bb328de2 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response_file.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .dataset_response import DatasetResponseParams +import typing + +if typing.TYPE_CHECKING: + from .prompt_response import PromptResponseParams + from .tool_response import ToolResponseParams + from .evaluator_response import EvaluatorResponseParams + from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams +AgentLinkedFileResponseFileParams = typing.Union[ + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", +] diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py new file mode 100644 index 00000000..0cb24b8a --- /dev/null +++ b/src/humanloop/requests/agent_log_response.py @@ -0,0 +1,201 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing_extensions +from .chat_message import ChatMessageParams +import typing +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_response import AgentResponseParams +import datetime as dt +from ..types.log_status import LogStatus +import typing + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class AgentLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py new file mode 100644 index 00000000..e239a69c --- /dev/null +++ b/src/humanloop/requests/agent_log_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoiceParams + +AgentLogResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py new file mode 100644 index 00000000..710d55cf --- /dev/null +++ b/src/humanloop/requests/agent_log_stream_response.py @@ -0,0 +1,87 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +import datetime as dt +from .chat_message import ChatMessageParams + + +class AgentLogStreamResponseParams(typing_extensions.TypedDict): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + id: str + """ + ID of the log. + """ + + agent_id: str + """ + ID of the Agent the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Agent. + """ diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py new file mode 100644 index 00000000..047904a7 --- /dev/null +++ b/src/humanloop/requests/agent_response.py @@ -0,0 +1,237 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from .agent_response_template import AgentResponseTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .agent_response_stop import AgentResponseStopParams +import typing +from .response_format import ResponseFormatParams +from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams +from .environment_response import EnvironmentResponseParams +import datetime as dt +from ..types.user_response import UserResponse +from ..types.version_status import VersionStatus +from .input_response import InputResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +import typing + +if typing.TYPE_CHECKING: + from .agent_response_tools_item import AgentResponseToolsItemParams + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class AgentResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Agent. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentResponseTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentResponseStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams] + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Sequence["AgentResponseToolsItemParams"] + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Agent version. Version names must be unique for a given Agent. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Agent. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + name: str + """ + Name of the Agent. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + + version_id: str + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["agent"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Agent. + """ + + committed_by: typing_extensions.NotRequired[UserResponse] + """ + The user who committed the Agent Version. + """ + + committed_at: typing_extensions.NotRequired[dt.datetime] + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Agent Version. + """ diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py new file mode 100644 index 00000000..de1b969f --- /dev/null +++ b/src/humanloop/requests/agent_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py new file mode 100644 index 00000000..a395ee73 --- /dev/null +++ b/src/humanloop/requests/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py new file mode 100644 index 00000000..94be65f1 --- /dev/null +++ b/src/humanloop/requests/agent_response_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessageParams + +AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py new file mode 100644 index 00000000..5181579b --- /dev/null +++ b/src/humanloop/requests/agent_response_tools_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .agent_inline_tool import AgentInlineToolParams +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponseParams +AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams] diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py new file mode 100644 index 00000000..3b328f7f --- /dev/null +++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing + + +class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict): + type: typing.Literal["redacted_thinking"] + data: str + """ + Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic + """ diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py new file mode 100644 index 00000000..34f6f99f --- /dev/null +++ b/src/humanloop/requests/anthropic_thinking_content.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing + + +class AnthropicThinkingContentParams(typing_extensions.TypedDict): + type: typing.Literal["thinking"] + thinking: str + """ + Model's chain-of-thought for providing the response. + """ + + signature: str + """ + Cryptographic signature that verifies the thinking block was generated by Anthropic. + """ diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py index cab8466d..6011653a 100644 --- a/src/humanloop/requests/chat_message.py +++ b/src/humanloop/requests/chat_message.py @@ -6,6 +6,7 @@ from ..types.chat_role import ChatRole import typing from .tool_call import ToolCallParams +from .chat_message_thinking_item import ChatMessageThinkingItemParams class ChatMessageParams(typing_extensions.TypedDict): @@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict): """ A list of tool calls requested by the assistant. """ + + thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]] + """ + Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. + """ diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py new file mode 100644 index 00000000..0691f4d8 --- /dev/null +++ b/src/humanloop/requests/chat_message_thinking_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .anthropic_thinking_content import AnthropicThinkingContentParams +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams + +ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams] diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py new file mode 100644 index 00000000..b1715517 --- /dev/null +++ b/src/humanloop/requests/create_agent_log_response.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from ..types.log_status import LogStatus + + +class CreateAgentLogResponseParams(typing_extensions.TypedDict): + """ + Response for an Agent Log. + """ + + id: str + """ + Unique identifier for the Log. + """ + + agent_id: str + """ + Unique identifier for the Agent. + """ + + version_id: str + """ + Unique identifier for the Agent Version. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. + """ diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py index 1d59ed4b..1cffd2b2 100644 --- a/src/humanloop/requests/dataset_response.py +++ b/src/humanloop/requests/dataset_response.py @@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict): Description of the Dataset. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py index f101bf15..db9370b9 100644 --- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py +++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py @@ -6,7 +6,13 @@ from .evaluator_response import EvaluatorResponseParams from .dataset_response import DatasetResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[ - PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + EvaluatorResponseParams, + DatasetResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py index 908eeb2d..1ff836fb 100644 --- a/src/humanloop/requests/evaluator_response.py +++ b/src/humanloop/requests/evaluator_response.py @@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict): Description of the Evaluator. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py index 4ac6b0c3..04c0b51d 100644 --- a/src/humanloop/requests/file_environment_response_file.py +++ b/src/humanloop/requests/file_environment_response_file.py @@ -6,7 +6,13 @@ from .dataset_response import DatasetResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams FileEnvironmentResponseFileParams = typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py new file mode 100644 index 00000000..bb70bda4 --- /dev/null +++ b/src/humanloop/requests/file_environment_variable_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict): + name: str + """ + Name of the environment variable. + """ + + value: str + """ + Value of the environment variable. + """ diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py index 18a26d10..eebc9fd7 100644 --- a/src/humanloop/requests/flow_response.py +++ b/src/humanloop/requests/flow_response.py @@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict): Description of the Flow. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py new file mode 100644 index 00000000..2bbba19c --- /dev/null +++ b/src/humanloop/requests/linked_file_request.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions + + +class LinkedFileRequestParams(typing_extensions.TypedDict): + file_id: str + environment_id: typing_extensions.NotRequired[str] + version_id: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py new file mode 100644 index 00000000..4a72f1db --- /dev/null +++ b/src/humanloop/requests/list_agents.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .agent_response import AgentResponseParams + + +class ListAgentsParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + """ + The list of Agents. + """ diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py index 15a4cff6..cb3ce212 100644 --- a/src/humanloop/requests/log_response.py +++ b/src/humanloop/requests/log_response.py @@ -9,6 +9,11 @@ from .tool_log_response import ToolLogResponseParams from .evaluator_log_response import EvaluatorLogResponseParams from .flow_log_response import FlowLogResponseParams + from .agent_log_response import AgentLogResponseParams LogResponseParams = typing.Union[ - "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams" + "PromptLogResponseParams", + "ToolLogResponseParams", + "EvaluatorLogResponseParams", + "FlowLogResponseParams", + "AgentLogResponseParams", ] diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py new file mode 100644 index 00000000..e142e7fb --- /dev/null +++ b/src/humanloop/requests/log_stream_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .prompt_call_stream_response import PromptCallStreamResponseParams +from .agent_log_stream_response import AgentLogStreamResponseParams + +LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams] diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py new file mode 100644 index 00000000..c8d67533 --- /dev/null +++ b/src/humanloop/requests/paginated_data_agent_response.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .agent_response import AgentResponseParams + + +class PaginatedDataAgentResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py similarity index 65% rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py index cf8bc4bf..0e7adb64 100644 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -2,16 +2,16 @@ import typing_extensions import typing -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, ) -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams( +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams( typing_extensions.TypedDict ): records: typing.Sequence[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams ] page: int size: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py similarity index 58% rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py index 1ba74108..b43a5521 100644 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -6,9 +6,13 @@ from .dataset_response import DatasetResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = ( - typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams - ] -) +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py index 190341b0..491cacd3 100644 --- a/src/humanloop/requests/populate_template_response.py +++ b/src/humanloop/requests/populate_template_response.py @@ -9,7 +9,7 @@ from .populate_template_response_stop import PopulateTemplateResponseStopParams import typing from .response_format import ResponseFormatParams -from ..types.reasoning_effort import ReasoningEffort +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams from .tool_function import ToolFunctionParams from .linked_tool_response import LinkedToolResponseParams from .environment_response import EnvironmentResponseParams @@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing_extensions.NotRequired[ReasoningEffort] + reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams] """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] @@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict): Name of the Prompt. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + version_id: str """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py new file mode 100644 index 00000000..6b1dd46a --- /dev/null +++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py index 61355166..1e4f56de 100644 --- a/src/humanloop/requests/prompt_kernel_request.py +++ b/src/humanloop/requests/prompt_kernel_request.py @@ -9,11 +9,17 @@ from .prompt_kernel_request_stop import PromptKernelRequestStopParams import typing from .response_format import ResponseFormatParams -from ..types.reasoning_effort import ReasoningEffort +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams from .tool_function import ToolFunctionParams class PromptKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + model: str """ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) @@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing_extensions.NotRequired[ReasoningEffort] + reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams] """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..0c3d194b --- /dev/null +++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py index 912866c5..b6ff03df 100644 --- a/src/humanloop/requests/prompt_response.py +++ b/src/humanloop/requests/prompt_response.py @@ -10,7 +10,7 @@ from .prompt_response_stop import PromptResponseStopParams import typing from .response_format import ResponseFormatParams -from ..types.reasoning_effort import ReasoningEffort +from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams from .tool_function import ToolFunctionParams from .linked_tool_response import LinkedToolResponseParams from .environment_response import EnvironmentResponseParams @@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing_extensions.NotRequired[ReasoningEffort] + reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams] """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]] @@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict): Name of the Prompt. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + version_id: str """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py new file mode 100644 index 00000000..4d019051 --- /dev/null +++ b/src/humanloop/requests/prompt_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py index 879ea25c..569d0d76 100644 --- a/src/humanloop/requests/run_version_response.py +++ b/src/humanloop/requests/run_version_response.py @@ -5,7 +5,8 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams RunVersionResponseParams = typing.Union[ - PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams ] diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py new file mode 100644 index 00000000..1c92b28f --- /dev/null +++ b/src/humanloop/requests/tool_call_response.py @@ -0,0 +1,146 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +import datetime as dt +from .tool_response import ToolResponseParams +import typing +from ..types.log_status import LogStatus +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class ToolCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Tool call. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + tool: ToolResponseParams + """ + Tool used to generate the Log. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + ID of the log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py index bac9dbbb..1aa0daea 100644 --- a/src/humanloop/requests/tool_log_response.py +++ b/src/humanloop/requests/tool_log_response.py @@ -7,6 +7,7 @@ import typing from ..types.log_status import LogStatus from .tool_response import ToolResponseParams +from .chat_message import ChatMessageParams import typing if typing.TYPE_CHECKING: @@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict): """ Tool used to generate the Log. """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the Tool. + """ diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py index 8a16af00..9659cb49 100644 --- a/src/humanloop/requests/version_deployment_response_file.py +++ b/src/humanloop/requests/version_deployment_response_file.py @@ -10,6 +10,12 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams VersionDeploymentResponseFileParams = typing.Union[ - "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams" + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", ] diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py index 50ecf7bc..9c317679 100644 --- a/src/humanloop/requests/version_id_response_version.py +++ b/src/humanloop/requests/version_id_response_version.py @@ -10,6 +10,12 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams VersionIdResponseVersionParams = typing.Union[ - "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams" + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", ] diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py index 16d75bd7..ea6b14a2 100644 --- a/src/humanloop/tools/client.py +++ b/src/humanloop/tools/client.py @@ -3,10 +3,11 @@ import typing from ..core.client_wrapper import SyncClientWrapper from .raw_client import RawToolsClient +from ..requests.tool_kernel_request import ToolKernelRequestParams import datetime as dt from ..types.log_status import LogStatus -from ..requests.tool_kernel_request import ToolKernelRequestParams from ..core.request_options import RequestOptions +from ..types.tool_call_response import ToolCallResponse from ..types.create_tool_log_response import CreateToolLogResponse from ..types.log_response import LogResponse from ..types.project_sort_by import ProjectSortBy @@ -29,6 +30,8 @@ from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( EvaluatorActivationDeactivationRequestDeactivateItemParams, ) +from ..types.file_environment_variable_request import FileEnvironmentVariableRequest +from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams from ..core.client_wrapper import AsyncClientWrapper from .raw_client import AsyncRawToolsClient from ..core.pagination import AsyncPager @@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient: """ return self._raw_client + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolCallResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.call() + """ + response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_call_request_environment=tool_call_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return response.data + def log( self, *, @@ -59,6 +189,7 @@ def log( environment: typing.Optional[str] = None, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, start_time: typing.Optional[dt.datetime] = OMIT, end_time: typing.Optional[dt.datetime] = OMIT, output: typing.Optional[str] = OMIT, @@ -78,7 +209,6 @@ def log( tool_log_request_environment: typing.Optional[str] = OMIT, save: typing.Optional[bool] = OMIT, log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateToolLogResponse: """ @@ -106,6 +236,9 @@ def log( id : typing.Optional[str] ID for an existing Tool. + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + start_time : typing.Optional[dt.datetime] When the logged event started. @@ -163,9 +296,6 @@ def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -206,6 +336,7 @@ def log( environment=environment, path=path, id=id, + tool=tool, start_time=start_time, end_time=end_time, output=output, @@ -225,7 +356,6 @@ def log( tool_log_request_environment=tool_log_request_environment, save=save, log_id=log_id, - tool=tool, request_options=request_options, ) return response.data @@ -966,6 +1096,112 @@ def update_monitoring( ) return response.data + def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.get_environment_variables( + id="id", + ) + """ + response = self._raw_client.get_environment_variables(id, request_options=request_options) + return response.data + + def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], + ) + """ + response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options) + return response.data + + def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.delete_environment_variable( + id="id", + name="name", + ) + """ + response = self._raw_client.delete_environment_variable(id, name, request_options=request_options) + return response.data + class AsyncToolsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient: """ return self._raw_client + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolCallResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.call() + + + asyncio.run(main()) + """ + response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + tool=tool, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_call_request_environment=tool_call_request_environment, + save=save, + log_id=log_id, + request_options=request_options, + ) + return response.data + async def log( self, *, @@ -989,6 +1360,7 @@ async def log( environment: typing.Optional[str] = None, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, start_time: typing.Optional[dt.datetime] = OMIT, end_time: typing.Optional[dt.datetime] = OMIT, output: typing.Optional[str] = OMIT, @@ -1008,7 +1380,6 @@ async def log( tool_log_request_environment: typing.Optional[str] = OMIT, save: typing.Optional[bool] = OMIT, log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateToolLogResponse: """ @@ -1036,6 +1407,9 @@ async def log( id : typing.Optional[str] ID for an existing Tool. + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + start_time : typing.Optional[dt.datetime] When the logged event started. @@ -1093,9 +1467,6 @@ async def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1144,6 +1515,7 @@ async def main() -> None: environment=environment, path=path, id=id, + tool=tool, start_time=start_time, end_time=end_time, output=output, @@ -1163,7 +1535,6 @@ async def main() -> None: tool_log_request_environment=tool_log_request_environment, save=save, log_id=log_id, - tool=tool, request_options=request_options, ) return response.data @@ -2010,3 +2381,133 @@ async def main() -> None: id, activate=activate, deactivate=deactivate, request_options=request_options ) return response.data + + async def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.get_environment_variables( + id="id", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.get_environment_variables(id, request_options=request_options) + return response.data + + async def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options) + return response.data + + async def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.delete_environment_variable( + id="id", + name="name", + ) + + + asyncio.run(main()) + """ + response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options) + return response.data diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py index 4a1f29e9..b412b771 100644 --- a/src/humanloop/tools/raw_client.py +++ b/src/humanloop/tools/raw_client.py @@ -2,18 +2,19 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from ..requests.tool_kernel_request import ToolKernelRequestParams import datetime as dt from ..types.log_status import LogStatus -from ..requests.tool_kernel_request import ToolKernelRequestParams from ..core.request_options import RequestOptions from ..core.http_response import HttpResponse -from ..types.create_tool_log_response import CreateToolLogResponse +from ..types.tool_call_response import ToolCallResponse from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError +from ..types.create_tool_log_response import CreateToolLogResponse from ..types.log_response import LogResponse from ..core.jsonable_encoder import jsonable_encoder from ..requests.tool_function import ToolFunctionParams @@ -27,6 +28,8 @@ from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( EvaluatorActivationDeactivationRequestDeactivateItemParams, ) +from ..types.file_environment_variable_request import FileEnvironmentVariableRequest +from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams from ..core.client_wrapper import AsyncClientWrapper from ..core.http_response import AsyncHttpResponse @@ -38,6 +41,159 @@ class RawToolsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolCallResponse]: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolCallResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolCallResponse, + construct_type( + type_=ToolCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def log( self, *, @@ -45,6 +201,7 @@ def log( environment: typing.Optional[str] = None, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, start_time: typing.Optional[dt.datetime] = OMIT, end_time: typing.Optional[dt.datetime] = OMIT, output: typing.Optional[str] = OMIT, @@ -64,7 +221,6 @@ def log( tool_log_request_environment: typing.Optional[str] = OMIT, save: typing.Optional[bool] = OMIT, log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> HttpResponse[CreateToolLogResponse]: """ @@ -92,6 +248,9 @@ def log( id : typing.Optional[str] ID for an existing Tool. + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + start_time : typing.Optional[dt.datetime] When the logged event started. @@ -149,9 +308,6 @@ def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -170,6 +326,9 @@ def log( json={ "path": path, "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), "start_time": start_time, "end_time": end_time, "output": output, @@ -189,9 +348,6 @@ def log( "environment": tool_log_request_environment, "save": save, "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), }, headers={ "content-type": "application/json", @@ -1038,75 +1194,387 @@ def update_monitoring( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. -class AsyncRawToolsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - async def log( + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_environment_variable( self, + id: str, *, - version_id: typing.Optional[str] = None, - environment: typing.Optional[str] = None, - path: typing.Optional[str] = OMIT, - id: typing.Optional[str] = OMIT, - start_time: typing.Optional[dt.datetime] = OMIT, - end_time: typing.Optional[dt.datetime] = OMIT, - output: typing.Optional[str] = OMIT, - created_at: typing.Optional[dt.datetime] = OMIT, - error: typing.Optional[str] = OMIT, - provider_latency: typing.Optional[float] = OMIT, - stdout: typing.Optional[str] = OMIT, - provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - source: typing.Optional[str] = OMIT, - metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - log_status: typing.Optional[LogStatus] = OMIT, - source_datapoint_id: typing.Optional[str] = OMIT, - trace_parent_id: typing.Optional[str] = OMIT, - user: typing.Optional[str] = OMIT, - tool_log_request_environment: typing.Optional[str] = OMIT, - save: typing.Optional[bool] = OMIT, - log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, + request: typing.Sequence[FileEnvironmentVariableRequestParams], request_options: typing.Optional[RequestOptions] = None, - ) -> AsyncHttpResponse[CreateToolLogResponse]: + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: """ - Log to a Tool. - - You can use query parameters `version_id`, or `environment`, to target - an existing version of the Tool. Otherwise the default deployed version will be chosen. - - Instead of targeting an existing version explicitly, you can instead pass in - Tool details in the request body. In this case, we will check if the details correspond - to an existing version of the Tool, if not we will create a new version. This is helpful - in the case where you are storing or deriving your Tool details in code. + Add an environment variable to a Tool. Parameters ---------- - version_id : typing.Optional[str] - A specific Version ID of the Tool to log to. - - environment : typing.Optional[str] - Name of the Environment identifying a deployed version to log to. - - path : typing.Optional[str] - Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. - - id : typing.Optional[str] - ID for an existing Tool. - - start_time : typing.Optional[dt.datetime] - When the logged event started. + id : str + Unique identifier for Tool. - end_time : typing.Optional[dt.datetime] - When the logged event ended. + request : typing.Sequence[FileEnvironmentVariableRequestParams] - output : typing.Optional[str] - Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - created_at : typing.Optional[dt.datetime] + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawToolsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolCallResponse]: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolCallResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolCallResponse, + construct_type( + type_=ToolCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateToolLogResponse]: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] User defined timestamp for when the log was created. error : typing.Optional[str] @@ -1154,9 +1622,6 @@ async def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1175,6 +1640,9 @@ async def log( json={ "path": path, "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), "start_time": start_time, "end_time": end_time, "output": output, @@ -1194,9 +1662,6 @@ async def log( "environment": tool_log_request_environment, "save": save, "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), }, headers={ "content-type": "application/json", @@ -2044,3 +2509,159 @@ async def update_monitoring( except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py index 156f4e9a..8130325d 100644 --- a/src/humanloop/types/__init__.py +++ b/src/humanloop/types/__init__.py @@ -1,15 +1,44 @@ # This file was auto-generated by Fern from our API Definition. +from .agent_call_response import AgentCallResponse +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +from .agent_call_stream_response import AgentCallStreamResponse +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload from .agent_config_response import AgentConfigResponse +from .agent_continue_response import AgentContinueResponse +from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice +from .agent_continue_stream_response import AgentContinueStreamResponse +from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload +from .agent_inline_tool import AgentInlineTool +from .agent_kernel_request import AgentKernelRequest +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort +from .agent_kernel_request_stop import AgentKernelRequestStop +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from .agent_linked_file_request import AgentLinkedFileRequest +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_linked_file_response_file import AgentLinkedFileResponseFile +from .agent_log_response import AgentLogResponse +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +from .agent_log_stream_response import AgentLogStreamResponse +from .agent_response import AgentResponse +from .agent_response_reasoning_effort import AgentResponseReasoningEffort +from .agent_response_stop import AgentResponseStop +from .agent_response_template import AgentResponseTemplate +from .agent_response_tools_item import AgentResponseToolsItem +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent +from .anthropic_thinking_content import AnthropicThinkingContent from .base_models_user_response import BaseModelsUserResponse from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse from .chat_message import ChatMessage from .chat_message_content import ChatMessageContent from .chat_message_content_item import ChatMessageContentItem +from .chat_message_thinking_item import ChatMessageThinkingItem from .chat_role import ChatRole from .chat_tool_type import ChatToolType from .code_evaluator_request import CodeEvaluatorRequest from .config_tool_response import ConfigToolResponse +from .create_agent_log_response import CreateAgentLogResponse from .create_datapoint_request import CreateDatapointRequest from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue from .create_evaluator_log_response import CreateEvaluatorLogResponse @@ -56,10 +85,12 @@ from .evaluator_return_type_enum import EvaluatorReturnTypeEnum from .evaluator_version_id import EvaluatorVersionId from .evaluators_request import EvaluatorsRequest +from .event_type import EventType from .external_evaluator_request import ExternalEvaluatorRequest from .feedback_type import FeedbackType from .file_environment_response import FileEnvironmentResponse from .file_environment_response_file import FileEnvironmentResponseFile +from .file_environment_variable_request import FileEnvironmentVariableRequest from .file_id import FileId from .file_path import FilePath from .file_request import FileRequest @@ -77,7 +108,9 @@ from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .input_response import InputResponse +from .linked_file_request import LinkedFileRequest from .linked_tool_response import LinkedToolResponse +from .list_agents import ListAgents from .list_datasets import ListDatasets from .list_evaluators import ListEvaluators from .list_flows import ListFlows @@ -86,6 +119,7 @@ from .llm_evaluator_request import LlmEvaluatorRequest from .log_response import LogResponse from .log_status import LogStatus +from .log_stream_response import LogStreamResponse from .model_endpoints import ModelEndpoints from .model_providers import ModelProviders from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest @@ -94,18 +128,21 @@ from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse from .observability_status import ObservabilityStatus +from .on_agent_call_enum import OnAgentCallEnum +from .open_ai_reasoning_effort import OpenAiReasoningEffort from .overall_stats import OverallStats +from .paginated_data_agent_response import PaginatedDataAgentResponse from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse from .paginated_data_flow_response import PaginatedDataFlowResponse from .paginated_data_log_response import PaginatedDataLogResponse from .paginated_data_prompt_response import PaginatedDataPromptResponse from .paginated_data_tool_response import PaginatedDataToolResponse -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, ) from .paginated_datapoint_response import PaginatedDatapointResponse from .paginated_dataset_response import PaginatedDatasetResponse @@ -115,6 +152,7 @@ from .platform_access_enum import PlatformAccessEnum from .populate_template_response import PopulateTemplateResponse from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort from .populate_template_response_stop import PopulateTemplateResponseStop from .populate_template_response_template import PopulateTemplateResponseTemplate from .project_sort_by import ProjectSortBy @@ -123,15 +161,16 @@ from .prompt_call_response_tool_choice import PromptCallResponseToolChoice from .prompt_call_stream_response import PromptCallStreamResponse from .prompt_kernel_request import PromptKernelRequest +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort from .prompt_kernel_request_stop import PromptKernelRequestStop from .prompt_kernel_request_template import PromptKernelRequestTemplate from .prompt_log_response import PromptLogResponse from .prompt_log_response_tool_choice import PromptLogResponseToolChoice from .prompt_response import PromptResponse +from .prompt_response_reasoning_effort import PromptResponseReasoningEffort from .prompt_response_stop import PromptResponseStop from .prompt_response_template import PromptResponseTemplate from .provider_api_keys import ProviderApiKeys -from .reasoning_effort import ReasoningEffort from .response_format import ResponseFormat from .response_format_type import ResponseFormatType from .run_stats_response import RunStatsResponse @@ -144,6 +183,7 @@ from .text_evaluator_stats_response import TextEvaluatorStatsResponse from .time_unit import TimeUnit from .tool_call import ToolCall +from .tool_call_response import ToolCallResponse from .tool_choice import ToolChoice from .tool_function import ToolFunction from .tool_kernel_request import ToolKernelRequest @@ -167,16 +207,45 @@ from .version_status import VersionStatus __all__ = [ + "AgentCallResponse", + "AgentCallResponseToolChoice", + "AgentCallStreamResponse", + "AgentCallStreamResponsePayload", "AgentConfigResponse", + "AgentContinueResponse", + "AgentContinueResponseToolChoice", + "AgentContinueStreamResponse", + "AgentContinueStreamResponsePayload", + "AgentInlineTool", + "AgentKernelRequest", + "AgentKernelRequestReasoningEffort", + "AgentKernelRequestStop", + "AgentKernelRequestTemplate", + "AgentKernelRequestToolsItem", + "AgentLinkedFileRequest", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLogResponse", + "AgentLogResponseToolChoice", + "AgentLogStreamResponse", + "AgentResponse", + "AgentResponseReasoningEffort", + "AgentResponseStop", + "AgentResponseTemplate", + "AgentResponseToolsItem", + "AnthropicRedactedThinkingContent", + "AnthropicThinkingContent", "BaseModelsUserResponse", "BooleanEvaluatorStatsResponse", "ChatMessage", "ChatMessageContent", "ChatMessageContentItem", + "ChatMessageThinkingItem", "ChatRole", "ChatToolType", "CodeEvaluatorRequest", "ConfigToolResponse", + "CreateAgentLogResponse", "CreateDatapointRequest", "CreateDatapointRequestTargetValue", "CreateEvaluatorLogResponse", @@ -221,10 +290,12 @@ "EvaluatorReturnTypeEnum", "EvaluatorVersionId", "EvaluatorsRequest", + "EventType", "ExternalEvaluatorRequest", "FeedbackType", "FileEnvironmentResponse", "FileEnvironmentResponseFile", + "FileEnvironmentVariableRequest", "FileId", "FilePath", "FileRequest", @@ -242,7 +313,9 @@ "ImageUrl", "ImageUrlDetail", "InputResponse", + "LinkedFileRequest", "LinkedToolResponse", + "ListAgents", "ListDatasets", "ListEvaluators", "ListFlows", @@ -251,6 +324,7 @@ "LlmEvaluatorRequest", "LogResponse", "LogStatus", + "LogStreamResponse", "ModelEndpoints", "ModelProviders", "MonitoringEvaluatorEnvironmentRequest", @@ -259,15 +333,18 @@ "MonitoringEvaluatorVersionRequest", "NumericEvaluatorStatsResponse", "ObservabilityStatus", + "OnAgentCallEnum", + "OpenAiReasoningEffort", "OverallStats", + "PaginatedDataAgentResponse", "PaginatedDataEvaluationLogResponse", "PaginatedDataEvaluatorResponse", "PaginatedDataFlowResponse", "PaginatedDataLogResponse", "PaginatedDataPromptResponse", "PaginatedDataToolResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", "PaginatedDatapointResponse", "PaginatedDatasetResponse", "PaginatedEvaluationResponse", @@ -276,6 +353,7 @@ "PlatformAccessEnum", "PopulateTemplateResponse", "PopulateTemplateResponsePopulatedTemplate", + "PopulateTemplateResponseReasoningEffort", "PopulateTemplateResponseStop", "PopulateTemplateResponseTemplate", "ProjectSortBy", @@ -284,15 +362,16 @@ "PromptCallResponseToolChoice", "PromptCallStreamResponse", "PromptKernelRequest", + "PromptKernelRequestReasoningEffort", "PromptKernelRequestStop", "PromptKernelRequestTemplate", "PromptLogResponse", "PromptLogResponseToolChoice", "PromptResponse", + "PromptResponseReasoningEffort", "PromptResponseStop", "PromptResponseTemplate", "ProviderApiKeys", - "ReasoningEffort", "ResponseFormat", "ResponseFormatType", "RunStatsResponse", @@ -305,6 +384,7 @@ "TextEvaluatorStatsResponse", "TimeUnit", "ToolCall", + "ToolCallResponse", "ToolChoice", "ToolFunction", "ToolKernelRequest", diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py new file mode 100644 index 00000000..ba3bbfec --- /dev/null +++ b/src/humanloop/types/agent_call_response.py @@ -0,0 +1,224 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +from .agent_log_response import AgentLogResponse +from .evaluator_log_response import EvaluatorLogResponse +from .flow_log_response import FlowLogResponse +from .prompt_log_response import PromptLogResponse +from .tool_log_response import ToolLogResponse +import typing +from .chat_message import ChatMessage +import pydantic +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +import datetime as dt +from .log_status import LogStatus +from .log_response import LogResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentCallResponse(UncheckedBaseModel): + """ + Response model for a Agent call. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponse = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py new file mode 100644 index 00000000..95eca73e --- /dev/null +++ b/src/humanloop/types/agent_call_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoice + +AgentCallResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py new file mode 100644 index 00000000..8e77671a --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse +from .evaluator_log_response import EvaluatorLogResponse +from .evaluator_response import EvaluatorResponse +from .flow_log_response import FlowLogResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_log_response import PromptLogResponse +from .prompt_response import PromptResponse +from .tool_log_response import ToolLogResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload +from .event_type import EventType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentCallStreamResponse(UncheckedBaseModel): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing.Optional[AgentCallStreamResponsePayload] = None + type: EventType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py new file mode 100644 index 00000000..85422047 --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponse +from .log_response import LogResponse +from .tool_call import ToolCall + +AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py new file mode 100644 index 00000000..0bbd7858 --- /dev/null +++ b/src/humanloop/types/agent_continue_response.py @@ -0,0 +1,224 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +from .agent_log_response import AgentLogResponse +from .evaluator_log_response import EvaluatorLogResponse +from .flow_log_response import FlowLogResponse +from .prompt_log_response import PromptLogResponse +from .tool_log_response import ToolLogResponse +import typing +from .chat_message import ChatMessage +import pydantic +from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice +import datetime as dt +from .log_status import LogStatus +from .log_response import LogResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentContinueResponse(UncheckedBaseModel): + """ + Response model for continuing an Agent call. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponse = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py new file mode 100644 index 00000000..20f3fb75 --- /dev/null +++ b/src/humanloop/types/agent_continue_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoice + +AgentContinueResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py new file mode 100644 index 00000000..872d6588 --- /dev/null +++ b/src/humanloop/types/agent_continue_stream_response.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse +from .evaluator_log_response import EvaluatorLogResponse +from .evaluator_response import EvaluatorResponse +from .flow_log_response import FlowLogResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_log_response import PromptLogResponse +from .prompt_response import PromptResponse +from .tool_log_response import ToolLogResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload +from .event_type import EventType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentContinueStreamResponse(UncheckedBaseModel): + """ + Response model for continuing an Agent call in streaming mode. + """ + + log_id: str + message: str + payload: typing.Optional[AgentContinueStreamResponsePayload] = None + type: EventType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py new file mode 100644 index 00000000..0e5f8a58 --- /dev/null +++ b/src/humanloop/types/agent_continue_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponse +from .log_response import LogResponse +from .tool_call import ToolCall + +AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py new file mode 100644 index 00000000..dc618c35 --- /dev/null +++ b/src/humanloop/types/agent_inline_tool.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .tool_function import ToolFunction +from .on_agent_call_enum import OnAgentCallEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentInlineTool(UncheckedBaseModel): + type: typing.Literal["inline"] = "inline" + json_schema: ToolFunction + on_agent_call: typing.Optional[OnAgentCallEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py new file mode 100644 index 00000000..6503b104 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request.py @@ -0,0 +1,122 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .model_endpoints import ModelEndpoints +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .template_language import TemplateLanguage +from .model_providers import ModelProviders +from .agent_kernel_request_stop import AgentKernelRequestStop +from .response_format import ResponseFormat +from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..a8e8e98b --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py new file mode 100644 index 00000000..e38c12e2 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py new file mode 100644 index 00000000..31a351f2 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessage + +AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..82c2fecf --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .agent_linked_file_request import AgentLinkedFileRequest +from .agent_inline_tool import AgentInlineTool + +AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py new file mode 100644 index 00000000..9efd4b6a --- /dev/null +++ b/src/humanloop/types/agent_linked_file_request.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .linked_file_request import LinkedFileRequest +from .on_agent_call_enum import OnAgentCallEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentLinkedFileRequest(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + on_agent_call: typing.Optional[OnAgentCallEnum] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py new file mode 100644 index 00000000..d85d682e --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response.py @@ -0,0 +1,39 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .linked_file_request import LinkedFileRequest +from .on_agent_call_enum import OnAgentCallEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentLinkedFileResponse(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + on_agent_call: typing.Optional[OnAgentCallEnum] = None + file: typing.Optional["AgentLinkedFileResponseFile"] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_response import AgentResponse # noqa: E402 +from .evaluator_response import EvaluatorResponse # noqa: E402 +from .flow_response import FlowResponse # noqa: E402 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 +from .prompt_response import PromptResponse # noqa: E402 +from .tool_response import ToolResponse # noqa: E402 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402 +from .version_id_response import VersionIdResponse # noqa: E402 +from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402 + +update_forward_refs(AgentLinkedFileResponse) diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py new file mode 100644 index 00000000..42d38fe4 --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response_file.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .dataset_response import DatasetResponse +import typing + +if typing.TYPE_CHECKING: + from .prompt_response import PromptResponse + from .tool_response import ToolResponse + from .evaluator_response import EvaluatorResponse + from .flow_response import FlowResponse + from .agent_response import AgentResponse +AgentLinkedFileResponseFile = typing.Union[ + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" +] diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py new file mode 100644 index 00000000..f5b5e8e8 --- /dev/null +++ b/src/humanloop/types/agent_log_response.py @@ -0,0 +1,224 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from .chat_message import ChatMessage +import pydantic +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +import datetime as dt +from .log_status import LogStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class AgentLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponse = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 +from .flow_log_response import FlowLogResponse # noqa: E402 +from .prompt_log_response import PromptLogResponse # noqa: E402 +from .tool_log_response import ToolLogResponse # noqa: E402 +from .log_response import LogResponse # noqa: E402 + +update_forward_refs(AgentLogResponse) diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py new file mode 100644 index 00000000..5cb07628 --- /dev/null +++ b/src/humanloop/types/agent_log_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoice + +AgentLogResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py new file mode 100644 index 00000000..91547189 --- /dev/null +++ b/src/humanloop/types/agent_log_stream_response.py @@ -0,0 +1,98 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +import datetime as dt +from .chat_message import ChatMessage +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentLogStreamResponse(UncheckedBaseModel): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + agent_id: str = pydantic.Field() + """ + ID of the Agent the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Agent. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py new file mode 100644 index 00000000..e58aaeba --- /dev/null +++ b/src/humanloop/types/agent_response.py @@ -0,0 +1,260 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .model_endpoints import ModelEndpoints +from .agent_response_template import AgentResponseTemplate +from .template_language import TemplateLanguage +from .model_providers import ModelProviders +from .agent_response_stop import AgentResponseStop +from .response_format import ResponseFormat +from .agent_response_reasoning_effort import AgentResponseReasoningEffort +import typing_extensions +from ..core.serialization import FieldMetadata +from .environment_response import EnvironmentResponse +import datetime as dt +from .user_response import UserResponse +from .version_status import VersionStatus +from .input_response import InputResponse +from .evaluator_aggregate import EvaluatorAggregate +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class AgentResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None) + """ + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. + """ + + tools: typing.List["AgentResponseToolsItem"] = pydantic.Field() + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Agent version. Version names must be unique for a given Agent. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Agent. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + name: str = pydantic.Field() + """ + Name of the Agent. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing.Optional[typing.Literal["agent"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Agent. + """ + + committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who committed the Agent Version. + """ + + committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus = pydantic.Field() + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Agent Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .evaluator_response import EvaluatorResponse # noqa: E402 +from .flow_response import FlowResponse # noqa: E402 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 +from .prompt_response import PromptResponse # noqa: E402 +from .tool_response import ToolResponse # noqa: E402 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402 +from .version_id_response import VersionIdResponse # noqa: E402 +from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402 + +update_forward_refs(AgentResponse) diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py new file mode 100644 index 00000000..59254f38 --- /dev/null +++ b/src/humanloop/types/agent_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py new file mode 100644 index 00000000..5c3b6a48 --- /dev/null +++ b/src/humanloop/types/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py new file mode 100644 index 00000000..4c084dc8 --- /dev/null +++ b/src/humanloop/types/agent_response_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessage + +AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py new file mode 100644 index 00000000..8095608f --- /dev/null +++ b/src/humanloop/types/agent_response_tools_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .agent_inline_tool import AgentInlineTool +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponse +AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool] diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py new file mode 100644 index 00000000..ebac897b --- /dev/null +++ b/src/humanloop/types/anthropic_redacted_thinking_content.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AnthropicRedactedThinkingContent(UncheckedBaseModel): + type: typing.Literal["redacted_thinking"] = "redacted_thinking" + data: str = pydantic.Field() + """ + Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py new file mode 100644 index 00000000..bf7fc808 --- /dev/null +++ b/src/humanloop/types/anthropic_thinking_content.py @@ -0,0 +1,28 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AnthropicThinkingContent(UncheckedBaseModel): + type: typing.Literal["thinking"] = "thinking" + thinking: str = pydantic.Field() + """ + Model's chain-of-thought for providing the response. + """ + + signature: str = pydantic.Field() + """ + Cryptographic signature that verifies the thinking block was generated by Anthropic. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py index c09f2768..c72bc90d 100644 --- a/src/humanloop/types/chat_message.py +++ b/src/humanloop/types/chat_message.py @@ -6,6 +6,7 @@ import pydantic from .chat_role import ChatRole from .tool_call import ToolCall +from .chat_message_thinking_item import ChatMessageThinkingItem from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel): A list of tool calls requested by the assistant. """ + thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None) + """ + Model's chain-of-thought for providing the response. Present on assistant messages if model supports it. + """ + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py new file mode 100644 index 00000000..0a507724 --- /dev/null +++ b/src/humanloop/types/chat_message_thinking_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .anthropic_thinking_content import AnthropicThinkingContent +from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent + +ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent] diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py new file mode 100644 index 00000000..9dc66629 --- /dev/null +++ b/src/humanloop/types/create_agent_log_response.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .log_status import LogStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CreateAgentLogResponse(UncheckedBaseModel): + """ + Response for an Agent Log. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + agent_id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Agent Version. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py index af79f597..2c614521 100644 --- a/src/humanloop/types/dataset_response.py +++ b/src/humanloop/types/dataset_response.py @@ -3,6 +3,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import pydantic import typing +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel): Description of the Dataset. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py index 5828a678..51f879b8 100644 --- a/src/humanloop/types/directory_with_parents_and_children_response.py +++ b/src/humanloop/types/directory_with_parents_and_children_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py index 0bfeebf7..9d0d5fc4 100644 --- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py +++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py @@ -6,7 +6,8 @@ from .evaluator_response import EvaluatorResponse from .dataset_response import DatasetResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[ - PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse + PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py index 9ba9fe4d..4332aa12 100644 --- a/src/humanloop/types/evaluatee_response.py +++ b/src/humanloop/types/evaluatee_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py index 413081c6..0c7de27e 100644 --- a/src/humanloop/types/evaluation_evaluator_response.py +++ b/src/humanloop/types/evaluation_evaluator_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py index 6c931db0..84d117e2 100644 --- a/src/humanloop/types/evaluation_log_response.py +++ b/src/humanloop/types/evaluation_log_response.py @@ -1,6 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py index f113fff5..bcda94a4 100644 --- a/src/humanloop/types/evaluation_response.py +++ b/src/humanloop/types/evaluation_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py index 1203ce2c..74d59e4c 100644 --- a/src/humanloop/types/evaluation_run_response.py +++ b/src/humanloop/types/evaluation_run_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py index d91e1ee9..e09b2a73 100644 --- a/src/humanloop/types/evaluation_runs_response.py +++ b/src/humanloop/types/evaluation_runs_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py index e457d580..71ca76c0 100644 --- a/src/humanloop/types/evaluator_log_response.py +++ b/src/humanloop/types/evaluator_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -189,6 +191,7 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py index 175f456d..712ca698 100644 --- a/src/humanloop/types/evaluator_response.py +++ b/src/humanloop/types/evaluator_response.py @@ -5,6 +5,8 @@ import pydantic import typing from .evaluator_response_spec import EvaluatorResponseSpec +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel): Description of the Evaluator. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. @@ -124,6 +133,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py new file mode 100644 index 00000000..128eed92 --- /dev/null +++ b/src/humanloop/types/event_type.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EventType = typing.Union[ + typing.Literal[ + "agent_turn_start", + "agent_turn_suspend", + "agent_turn_continue", + "agent_turn_end", + "agent_start", + "agent_update", + "agent_end", + "tool_start", + "tool_update", + "tool_end", + "error", + "agent_generation_error", + ], + typing.Any, +] diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py index 70ed322f..7f34b7b3 100644 --- a/src/humanloop/types/file_environment_response.py +++ b/src/humanloop/types/file_environment_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py index 2a105c9d..0254c2b8 100644 --- a/src/humanloop/types/file_environment_response_file.py +++ b/src/humanloop/types/file_environment_response_file.py @@ -6,7 +6,8 @@ from .dataset_response import DatasetResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse FileEnvironmentResponseFile = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py new file mode 100644 index 00000000..8108245b --- /dev/null +++ b/src/humanloop/types/file_environment_variable_request.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class FileEnvironmentVariableRequest(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Name of the environment variable. + """ + + value: str = pydantic.Field() + """ + Value of the environment variable. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py index 7a870b84..f235825b 100644 --- a/src/humanloop/types/file_type.py +++ b/src/humanloop/types/file_type.py @@ -2,4 +2,4 @@ import typing -FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any] +FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any] diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py index c32b9755..753d9ba2 100644 --- a/src/humanloop/types/files_tool_type.py +++ b/src/humanloop/types/files_tool_type.py @@ -3,5 +3,5 @@ import typing FilesToolType = typing.Union[ - typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any + typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any ] diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py index ba1e1cf6..58a87fac 100644 --- a/src/humanloop/types/flow_log_response.py +++ b/src/humanloop/types/flow_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -173,6 +175,7 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py index 4017b3b7..7768778e 100644 --- a/src/humanloop/types/flow_response.py +++ b/src/humanloop/types/flow_response.py @@ -4,6 +4,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import pydantic import typing +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel): Description of the Flow. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. @@ -111,6 +120,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py new file mode 100644 index 00000000..ee45ffdf --- /dev/null +++ b/src/humanloop/types/linked_file_request.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LinkedFileRequest(UncheckedBaseModel): + file_id: str + environment_id: typing.Optional[str] = None + version_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py new file mode 100644 index 00000000..36481f41 --- /dev/null +++ b/src/humanloop/types/list_agents.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ListAgents(UncheckedBaseModel): + records: typing.List[AgentResponse] = pydantic.Field() + """ + The list of Agents. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py index 61edbec5..7b736e14 100644 --- a/src/humanloop/types/list_evaluators.py +++ b/src/humanloop/types/list_evaluators.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py index 686dab26..41ec4008 100644 --- a/src/humanloop/types/list_flows.py +++ b/src/humanloop/types/list_flows.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py index 94cda05e..f773d3f9 100644 --- a/src/humanloop/types/list_prompts.py +++ b/src/humanloop/types/list_prompts.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py index 4080a6a1..84ddc89c 100644 --- a/src/humanloop/types/list_tools.py +++ b/src/humanloop/types/list_tools.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py index 0ba81dd3..cd7a0a26 100644 --- a/src/humanloop/types/log_response.py +++ b/src/humanloop/types/log_response.py @@ -9,4 +9,7 @@ from .tool_log_response import ToolLogResponse from .evaluator_log_response import EvaluatorLogResponse from .flow_log_response import FlowLogResponse -LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"] + from .agent_log_response import AgentLogResponse +LogResponse = typing.Union[ + "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse" +] diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py new file mode 100644 index 00000000..69ffacf4 --- /dev/null +++ b/src/humanloop/types/log_stream_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .prompt_call_stream_response import PromptCallStreamResponse +from .agent_log_stream_response import AgentLogStreamResponse + +LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse] diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py index 8473d2ae..3f2c99fb 100644 --- a/src/humanloop/types/model_providers.py +++ b/src/humanloop/types/model_providers.py @@ -4,7 +4,7 @@ ModelProviders = typing.Union[ typing.Literal[ - "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek" + "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate" ], typing.Any, ] diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py index e70dc4fb..1809af57 100644 --- a/src/humanloop/types/monitoring_evaluator_response.py +++ b/src/humanloop/types/monitoring_evaluator_response.py @@ -39,6 +39,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py new file mode 100644 index 00000000..3730256e --- /dev/null +++ b/src/humanloop/types/on_agent_call_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any] diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py new file mode 100644 index 00000000..d8c48547 --- /dev/null +++ b/src/humanloop/types/open_ai_reasoning_effort.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any] diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py new file mode 100644 index 00000000..0febbadd --- /dev/null +++ b/src/humanloop/types/paginated_data_agent_response.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class PaginatedDataAgentResponse(UncheckedBaseModel): + records: typing.List[AgentResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py index 9e3c568e..c508f8a6 100644 --- a/src/humanloop/types/paginated_data_evaluation_log_response.py +++ b/src/humanloop/types/paginated_data_evaluation_log_response.py @@ -1,6 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py index 275f0528..2e82c736 100644 --- a/src/humanloop/types/paginated_data_evaluator_response.py +++ b/src/humanloop/types/paginated_data_evaluator_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py index 990d58be..6cfcf9ae 100644 --- a/src/humanloop/types/paginated_data_flow_response.py +++ b/src/humanloop/types/paginated_data_flow_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py index 57bae587..f41ca9ba 100644 --- a/src/humanloop/types/paginated_data_log_response.py +++ b/src/humanloop/types/paginated_data_log_response.py @@ -1,6 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py index ff71e584..d9e1d914 100644 --- a/src/humanloop/types/paginated_data_prompt_response.py +++ b/src/humanloop/types/paginated_data_prompt_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py index 0e52b361..e2962e87 100644 --- a/src/humanloop/types/paginated_data_tool_response.py +++ b/src/humanloop/types/paginated_data_tool_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py similarity index 76% rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py index bd7082b3..87d5b603 100644 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -9,16 +11,18 @@ from .version_deployment_response import VersionDeploymentResponse from .version_id_response import VersionIdResponse import typing -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, ) from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel): +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse( + UncheckedBaseModel +): records: typing.List[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem ] page: int size: int diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py similarity index 63% rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py index 65c4f324..a1b4f056 100644 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -6,7 +6,8 @@ from .dataset_response import DatasetResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse -] +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = ( + typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse] +) diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py index 78e177e8..16232e0b 100644 --- a/src/humanloop/types/paginated_evaluation_response.py +++ b/src/humanloop/types/paginated_evaluation_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py index d587d175..efcd1d0c 100644 --- a/src/humanloop/types/populate_template_response.py +++ b/src/humanloop/types/populate_template_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -16,9 +18,11 @@ from .model_providers import ModelProviders from .populate_template_response_stop import PopulateTemplateResponseStop from .response_format import ResponseFormat -from .reasoning_effort import ReasoningEffort +from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort from .tool_function import ToolFunction from .linked_tool_response import LinkedToolResponse +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None) + reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None) """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) @@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel): Name of the Prompt. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + version_id: str = pydantic.Field() """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py new file mode 100644 index 00000000..8dd9f7f6 --- /dev/null +++ b/src/humanloop/types/populate_template_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py index 4e1ae69c..ec74437f 100644 --- a/src/humanloop/types/prompt_call_response.py +++ b/src/humanloop/types/prompt_call_response.py @@ -1,6 +1,8 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py index 6461bb19..80ba5ed5 100644 --- a/src/humanloop/types/prompt_kernel_request.py +++ b/src/humanloop/types/prompt_kernel_request.py @@ -9,12 +9,18 @@ from .model_providers import ModelProviders from .prompt_kernel_request_stop import PromptKernelRequestStop from .response_format import ResponseFormat -from .reasoning_effort import ReasoningEffort +from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort from .tool_function import ToolFunction from ..core.pydantic_utilities import IS_PYDANTIC_V2 class PromptKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + model: str = pydantic.Field() """ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) @@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None) + reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None) """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py new file mode 100644 index 00000000..dda61bb4 --- /dev/null +++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py index 2a1bad11..a9e26318 100644 --- a/src/humanloop/types/prompt_log_response.py +++ b/src/humanloop/types/prompt_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -213,6 +215,7 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py index 07f4755d..5d6ff870 100644 --- a/src/humanloop/types/prompt_response.py +++ b/src/humanloop/types/prompt_response.py @@ -10,9 +10,11 @@ from .model_providers import ModelProviders from .prompt_response_stop import PromptResponseStop from .response_format import ResponseFormat -from .reasoning_effort import ReasoningEffort +from .prompt_response_reasoning_effort import PromptResponseReasoningEffort from .tool_function import ToolFunction from .linked_tool_response import LinkedToolResponse +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel): The format of the response. Only `{"type": "json_object"}` is currently supported for chat. """ - reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None) + reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None) """ - Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget. """ tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None) @@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel): Name of the Prompt. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + version_id: str = pydantic.Field() """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. @@ -224,6 +233,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py new file mode 100644 index 00000000..e136637f --- /dev/null +++ b/src/humanloop/types/prompt_response_reasoning_effort.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .open_ai_reasoning_effort import OpenAiReasoningEffort + +PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int] diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py deleted file mode 100644 index da0a0354..00000000 --- a/src/humanloop/types/reasoning_effort.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any] diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py index d94b1178..770dc487 100644 --- a/src/humanloop/types/run_version_response.py +++ b/src/humanloop/types/run_version_response.py @@ -5,5 +5,6 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse -RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse] +RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse] diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py new file mode 100644 index 00000000..55bf2712 --- /dev/null +++ b/src/humanloop/types/tool_call_response.py @@ -0,0 +1,168 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +from .agent_log_response import AgentLogResponse +from .evaluator_log_response import EvaluatorLogResponse +from .flow_log_response import FlowLogResponse +from .prompt_log_response import PromptLogResponse +from .tool_log_response import ToolLogResponse +import typing +import datetime as dt +import pydantic +from .log_status import LogStatus +from .log_response import LogResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ToolCallResponse(UncheckedBaseModel): + """ + Response model for a Tool call. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + tool: ToolResponse = pydantic.Field() + """ + Tool used to generate the Log. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py index 1b6081c3..251223af 100644 --- a/src/humanloop/types/tool_log_response.py +++ b/src/humanloop/types/tool_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -13,6 +15,7 @@ import datetime as dt import pydantic from .log_status import LogStatus +from .chat_message import ChatMessage from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.pydantic_utilities import update_forward_refs @@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel): Tool used to generate the Log. """ + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the Tool. + """ + if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 else: @@ -162,6 +170,7 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py index 0b835918..70537215 100644 --- a/src/humanloop/types/tool_response.py +++ b/src/humanloop/types/tool_response.py @@ -152,6 +152,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py index e2e82d9f..0db57d69 100644 --- a/src/humanloop/types/version_deployment_response.py +++ b/src/humanloop/types/version_deployment_response.py @@ -36,6 +36,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py index e0f73573..4fadcff0 100644 --- a/src/humanloop/types/version_deployment_response_file.py +++ b/src/humanloop/types/version_deployment_response_file.py @@ -10,6 +10,7 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse + from .agent_response import AgentResponse VersionDeploymentResponseFile = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse" + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" ] diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py index 877851a9..e3f5dc27 100644 --- a/src/humanloop/types/version_id_response.py +++ b/src/humanloop/types/version_id_response.py @@ -30,6 +30,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py index 2f56346c..b1cbd45d 100644 --- a/src/humanloop/types/version_id_response_version.py +++ b/src/humanloop/types/version_id_response_version.py @@ -10,6 +10,7 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse + from .agent_response import AgentResponse VersionIdResponseVersion = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse" + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" ] From 6c5f8388404560d294307480ba1ae3627182f533 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Thu, 24 Apr 2025 14:59:42 +0100 Subject: [PATCH 02/10] set up initial test for sync operation --- poetry.lock | 126 +++++++++++++++++++++++++++------ pyproject.toml | 5 +- src/humanloop/client.py | 3 + src/humanloop/sync/__init__.py | 0 tests/conftest.py | 5 +- tests/sync/test_sync.py | 95 +++++++++++++++++++++++++ 6 files changed, 212 insertions(+), 22 deletions(-) create mode 100644 src/humanloop/sync/__init__.py create mode 100644 tests/sync/test_sync.py diff --git a/poetry.lock b/poetry.lock index 056dd550..ad333475 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,6 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -17,6 +18,7 @@ version = "0.50.0" description = "The official Python library for the anthropic API" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"}, {file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"}, @@ -41,6 +43,7 @@ version = "4.9.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"}, {file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"}, @@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -63,18 +66,19 @@ version = "25.3.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "certifi" @@ -82,6 +86,7 @@ version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, @@ -93,6 +98,7 @@ version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, @@ -194,6 +200,7 @@ version = "5.15.0" description = "" optional = false python-versions = "<4.0,>=3.9" +groups = ["dev"] files = [ {file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"}, {file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"}, @@ -216,10 +223,12 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""} [[package]] name = "deepdiff" @@ -227,6 +236,7 @@ version = "8.4.2" description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"}, {file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"}, @@ -245,6 +255,7 @@ version = "1.2.18" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +groups = ["main"] files = [ {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, @@ -254,7 +265,7 @@ files = [ wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] [[package]] name = "distro" @@ -262,6 +273,7 @@ version = "1.9.0" description = "Distro - an OS platform information API" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -273,6 +285,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -287,6 +301,7 @@ version = "1.10.0" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"}, {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"}, @@ -333,6 +348,7 @@ version = "3.18.0" description = "A platform independent file lock." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, @@ -341,7 +357,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "fsspec" @@ -349,6 +365,7 @@ version = "2025.3.2" description = "File-system specification" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"}, {file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"}, @@ -388,6 +405,7 @@ version = "0.23.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"}, {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"}, @@ -407,6 +425,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -418,6 +437,7 @@ version = "1.0.8" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"}, {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"}, @@ -439,6 +459,7 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, @@ -451,7 +472,7 @@ httpcore = "==1.*" idna = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -463,6 +484,7 @@ version = "0.4.0" description = "Consume Server-Sent Event (SSE) messages with HTTPX." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, @@ -474,6 +496,7 @@ version = "0.30.2" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" +groups = ["main", "dev"] files = [ {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"}, {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"}, @@ -509,6 +532,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -523,6 +547,7 @@ version = "8.6.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"}, {file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"}, @@ -532,12 +557,12 @@ files = [ zipp = ">=3.20" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] type = ["pytest-mypy"] [[package]] @@ -546,6 +571,7 @@ version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -557,6 +583,7 @@ version = "0.9.0" description = "Fast iterable JSON parser." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"}, {file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"}, @@ -642,6 +669,7 @@ version = "4.23.0" description = "An implementation of JSON Schema validation for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"}, {file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"}, @@ -663,6 +691,7 @@ version = "2025.4.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"}, {file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"}, @@ -677,6 +706,7 @@ version = "5.1.0" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"}, {file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"}, @@ -775,6 +805,7 @@ version = "1.0.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"}, {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"}, @@ -821,6 +852,7 @@ version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -832,6 +864,7 @@ version = "1.26.4" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, @@ -877,6 +910,7 @@ version = "1.76.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"}, {file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"}, @@ -903,6 +937,7 @@ version = "1.32.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"}, {file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"}, @@ -918,6 +953,7 @@ version = "0.53b1" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"}, {file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"}, @@ -935,6 +971,7 @@ version = "0.39.2" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"}, {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"}, @@ -952,6 +989,7 @@ version = "0.39.2" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"}, {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"}, @@ -971,6 +1009,7 @@ version = "0.39.2" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"}, {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"}, @@ -988,6 +1027,7 @@ version = "0.39.2" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"}, {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"}, @@ -1005,6 +1045,7 @@ version = "0.39.2" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"}, {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"}, @@ -1023,6 +1064,7 @@ version = "0.39.2" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"}, {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"}, @@ -1040,6 +1082,7 @@ version = "1.32.1" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"}, {file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"}, @@ -1054,6 +1097,7 @@ version = "1.32.1" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"}, {file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"}, @@ -1070,6 +1114,7 @@ version = "0.53b1" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"}, {file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"}, @@ -1085,6 +1130,7 @@ version = "0.4.3" description = "OpenTelemetry Semantic Conventions Extension for Large Language Models" optional = false python-versions = "<4,>=3.9" +groups = ["main"] files = [ {file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"}, {file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"}, @@ -1096,6 +1142,7 @@ version = "5.4.0" description = "Orderly set" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "orderly_set-5.4.0-py3-none-any.whl", hash = "sha256:f0192a7f9ae3385b587b71688353fae491d1ca45878496eb71ea118be1623639"}, {file = "orderly_set-5.4.0.tar.gz", hash = "sha256:c8ff5ba824abe4eebcbbdd3f646ff3648ad0dd52239319d90056d8d30b6cccdd"}, @@ -1107,6 +1154,7 @@ version = "25.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1118,6 +1166,7 @@ version = "2.2.3" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, @@ -1204,6 +1253,7 @@ version = "1.20.2" description = "parse() is the opposite of format()" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"}, {file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"}, @@ -1215,6 +1265,7 @@ version = "0.6.4" description = "Simplifies to build parse types based on the parse module" optional = false python-versions = "!=3.0.*,!=3.1.*,>=2.7" +groups = ["dev"] files = [ {file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"}, {file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"}, @@ -1225,9 +1276,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} six = ">=1.15" [package.extras] -develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"] docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"] -testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"] +testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"] [[package]] name = "pluggy" @@ -1235,6 +1286,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1250,6 +1302,7 @@ version = "5.29.4" description = "" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, @@ -1270,6 +1323,7 @@ version = "19.0.1" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"}, {file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"}, @@ -1324,6 +1378,7 @@ version = "2.11.3" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"}, {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"}, @@ -1337,7 +1392,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -1345,6 +1400,7 @@ version = "2.33.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"}, {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"}, @@ -1456,6 +1512,7 @@ version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -1478,6 +1535,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -1496,6 +1554,7 @@ version = "1.7.0" description = "Adds the ability to retry flaky tests in CI environments" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"}, {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"}, @@ -1513,6 +1572,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -1527,6 +1587,7 @@ version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, @@ -1541,6 +1602,7 @@ version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, @@ -1552,6 +1614,7 @@ version = "6.0.2" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -1614,6 +1677,7 @@ version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, @@ -1630,6 +1694,7 @@ version = "2024.11.6" description = "Alternative regular expression module, to replace re." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -1733,6 +1798,7 @@ version = "1.0.4" description = "Python client for Replicate" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"}, {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"}, @@ -1750,6 +1816,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1771,6 +1838,7 @@ version = "0.24.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"}, {file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"}, @@ -1894,6 +1962,7 @@ version = "0.5.7" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"}, {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"}, @@ -1921,6 +1990,7 @@ version = "1.17.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -1932,6 +2002,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -1943,6 +2014,7 @@ version = "0.9.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"}, {file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"}, @@ -1990,6 +2062,7 @@ version = "0.21.1" description = "" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"}, {file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"}, @@ -2022,6 +2095,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -2063,6 +2138,7 @@ version = "4.67.1" description = "Fast, Extensible Progress Meter" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"}, {file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"}, @@ -2084,6 +2160,7 @@ version = "4.23.0.20241208" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"}, {file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"}, @@ -2098,6 +2175,7 @@ version = "5.29.1.20250403" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"}, {file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"}, @@ -2109,6 +2187,7 @@ version = "2.9.0.20241206" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"}, {file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"}, @@ -2120,6 +2199,7 @@ version = "2.32.0.20250328" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, @@ -2134,6 +2214,7 @@ version = "4.13.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, @@ -2145,6 +2226,7 @@ version = "0.4.0" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, @@ -2159,6 +2241,7 @@ version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" +groups = ["dev"] files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -2170,13 +2253,14 @@ version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -2187,6 +2271,7 @@ version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, @@ -2275,20 +2360,21 @@ version = "3.21.0" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] type = ["pytest-mypy"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d" +content-hash = "a504b0d639ca08283dd45b6af246f7e5f2a6ed5b26fb58e90af77d320ef2045a" diff --git a/pyproject.toml b/pyproject.toml index 73f2c3d4..9a81cf79 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,8 @@ [project] name = "humanloop" +description = "The Humanloop Python Library" +authors = [] +keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"] [tool.poetry] name = "humanloop" @@ -54,7 +57,7 @@ pydantic = ">= 1.9.2" pydantic-core = "^2.18.2" typing_extensions = ">= 4.0.0" -[tool.poetry.dev-dependencies] +[tool.poetry.group.dev.dependencies] mypy = "1.0.1" pytest = "^7.4.0" pytest-asyncio = "^0.23.5" diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 2daa7769..a2510117 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -347,6 +347,9 @@ def agent(): path=path, attributes=attributes, ) + + def sync(self): + return "Hello world" class AsyncHumanloop(AsyncBaseHumanloop): diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/conftest.py b/tests/conftest.py index 80e3b336..c3e35481 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -192,7 +192,10 @@ def api_keys() -> APIKeys: @pytest.fixture(scope="session") def humanloop_client(api_keys: APIKeys) -> Humanloop: - return Humanloop(api_key=api_keys.humanloop) + return Humanloop( + api_key=api_keys.humanloop, + base_url="http://localhost:80/v5", + ) @pytest.fixture(scope="session", autouse=True) diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py new file mode 100644 index 00000000..218d89a9 --- /dev/null +++ b/tests/sync/test_sync.py @@ -0,0 +1,95 @@ +import pytest +from humanloop import Humanloop, FileType +from pathlib import Path +from typing import List, NamedTuple + + +class SyncableFile(NamedTuple): + path: str + type: FileType + model: str + id: str = "" + + +@pytest.fixture +def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[SyncableFile]: + """Creates a predefined structure of files in Humanloop for testing sync""" + files: List[SyncableFile] = [ + SyncableFile( + path="prompts/gpt-4", + type="prompt", + model="gpt-4", + ), + SyncableFile( + path="prompts/gpt-4o", + type="prompt", + model="gpt-4o", + ), + SyncableFile( + path="prompts/nested/complex/gpt-4o", + type="prompt", + model="gpt-4o", + ), + SyncableFile( + path="agents/gpt-4", + type="agent", + model="gpt-4", + ), + SyncableFile( + path="agents/gpt-4o", + type="agent", + model="gpt-4o", + ), + ] + + # Create the files in Humanloop + created_files = [] + for file in files: + full_path = get_test_path(file.path) + if file.type == "prompt": + response = humanloop_client.prompts.upsert( + path=full_path, + model=file.model, + ) + elif file.type == "agent": + response = humanloop_client.agents.upsert( + path=full_path, + model=file.model, + ) + created_files.append(SyncableFile(path=full_path, type=file.type, model=file.model, id=response.id)) + + return created_files + + +@pytest.fixture +def cleanup_local_files(): + """Cleanup any locally synced files after tests""" + yield + # Clean up the local humanloop directory after tests + local_dir = Path("humanloop") + if local_dir.exists(): + import shutil + + shutil.rmtree(local_dir) + + +def test_sync_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files): + """Test that humanloop.sync() correctly syncs remote files to local filesystem""" + # Run the sync + successful_files = humanloop_client.sync() + + # Verify each file was synced correctly + for file in test_file_structure: + # Get the extension based on file type: .prompt, .agent + extension = f".{file.type}" + + # The local path should mirror the remote path structure + local_path = Path("humanloop") / f"{file.path}{extension}" + + # Basic assertions + assert local_path.exists(), f"Expected synced file at {local_path}" + assert local_path.parent.exists(), f"Expected directory at {local_path.parent}" + + # Verify it's not empty + content = local_path.read_text() + assert content, f"File at {local_path} should not be empty" From ab3562b0b8971a6db35c9ff039daf0a652b0bbf5 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Thu, 24 Apr 2025 15:05:07 +0100 Subject: [PATCH 03/10] fix type error in test --- tests/sync/test_sync.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py index 218d89a9..af5fd6b1 100644 --- a/tests/sync/test_sync.py +++ b/tests/sync/test_sync.py @@ -1,7 +1,7 @@ -import pytest -from humanloop import Humanloop, FileType +from typing import List, NamedTuple, Union from pathlib import Path -from typing import List, NamedTuple +import pytest +from humanloop import Humanloop, FileType, AgentResponse, PromptResponse class SyncableFile(NamedTuple): @@ -46,6 +46,7 @@ def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[Sync created_files = [] for file in files: full_path = get_test_path(file.path) + response: Union[AgentResponse, PromptResponse] if file.type == "prompt": response = humanloop_client.prompts.upsert( path=full_path, From 02c0803f61ba1299d614d09f62affc81d4937aa9 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Thu, 24 Apr 2025 15:42:07 +0100 Subject: [PATCH 04/10] basic sync functionality --- .fernignore | 1 + src/humanloop/agents/raw_client.py | 2 +- src/humanloop/client.py | 33 ++++++- src/humanloop/prompts/raw_client.py | 2 +- src/humanloop/sync/__init__.py | 3 + src/humanloop/sync/sync_utils.py | 130 ++++++++++++++++++++++++++++ tests/conftest.py | 4 +- 7 files changed, 167 insertions(+), 8 deletions(-) create mode 100644 src/humanloop/sync/sync_utils.py diff --git a/.fernignore b/.fernignore index 112f779b..e7ec8aee 100644 --- a/.fernignore +++ b/.fernignore @@ -13,6 +13,7 @@ mypy.ini README.md src/humanloop/decorators src/humanloop/otel +src/humanloop/sync ## Tests diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py index 4957e33b..bdc2bd69 100644 --- a/src/humanloop/agents/raw_client.py +++ b/src/humanloop/agents/raw_client.py @@ -1886,7 +1886,7 @@ def serialize( ) try: if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) + return HttpResponse(response=_response, data=_response.text) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( diff --git a/src/humanloop/client.py b/src/humanloop/client.py index a2510117..e850de9c 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -24,6 +24,7 @@ from humanloop.otel.processor import HumanloopSpanProcessor from humanloop.prompt_utils import populate_template from humanloop.prompts.client import PromptsClient +from humanloop.sync import sync class ExtendedEvalsClient(EvaluationsClient): @@ -82,8 +83,9 @@ class Humanloop(BaseHumanloop): """ See docstring of :class:`BaseHumanloop`. - This class extends the base client with custom evaluation utilities - and decorators for declaring Files in code. + This class extends the base client with custom evaluation utilities, + decorators for declaring Files in code, and utilities for syncing + files between Humanloop and local filesystem. """ def __init__( @@ -348,8 +350,31 @@ def agent(): attributes=attributes, ) - def sync(self): - return "Hello world" + def sync(self) -> List[str]: + """Sync prompt and agent files from Humanloop to local filesystem. + + This method will: + 1. Fetch all prompt and agent files from your Humanloop workspace + 2. Save them to the local filesystem in a 'humanloop/' directory + 3. Maintain the same directory structure as in Humanloop + 4. Add appropriate file extensions (.prompt or .agent) + + Currently only supports syncing prompt and agent files. Other file types will be skipped. + + The files will be saved with the following structure: + ``` + humanloop/ + ├── prompts/ + │ ├── my_prompt.prompt + │ └── nested/ + │ └── another_prompt.prompt + └── agents/ + └── my_agent.agent + ``` + + :return: List of successfully processed file paths + """ + return sync(self) class AsyncHumanloop(AsyncBaseHumanloop): diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py index 2b907d91..af40b995 100644 --- a/src/humanloop/prompts/raw_client.py +++ b/src/humanloop/prompts/raw_client.py @@ -1793,7 +1793,7 @@ def serialize( ) try: if 200 <= _response.status_code < 300: - return HttpResponse(response=_response, data=None) + return HttpResponse(response=_response, data=_response.text) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py index e69de29b..7b55e70e 100644 --- a/src/humanloop/sync/__init__.py +++ b/src/humanloop/sync/__init__.py @@ -0,0 +1,3 @@ +from humanloop.sync.sync_utils import sync + +__all__ = ["sync"] diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py new file mode 100644 index 00000000..1e54abba --- /dev/null +++ b/src/humanloop/sync/sync_utils.py @@ -0,0 +1,130 @@ +import os +import logging +from pathlib import Path +import concurrent.futures +from typing import List, TYPE_CHECKING, Union + +from humanloop.types import FileType, PromptResponse, AgentResponse +from humanloop.core.api_error import ApiError + +if TYPE_CHECKING: + from humanloop.base_client import BaseHumanloop + +# Set up logging +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +console_handler = logging.StreamHandler() +formatter = logging.Formatter("%(message)s") +console_handler.setFormatter(formatter) +if not logger.hasHandlers(): + logger.addHandler(console_handler) + +def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None: + """Save serialized file to local filesystem. + + :param serialized_content: The content to save + :param file_path: The path where to save the file + :param file_type: The type of file (prompt or agent) + """ + try: + # Create full path including humanloop/ prefix + full_path = Path("humanloop") / file_path + # Create directory if it doesn't exist + full_path.parent.mkdir(parents=True, exist_ok=True) + + # Add file type extension + new_path = full_path.parent / f"{full_path.stem}.{file_type}" + + # Write content to file + with open(new_path, "w") as f: + f.write(serialized_content) + logger.info(f"Syncing {file_type} {file_path}") + except Exception as e: + logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}") + raise + +def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse]) -> None: + """Process a single file by serializing and saving it. + + Currently only supports prompt and agent files. Other file types will be skipped. + + :param client: Humanloop client instance + :param file: The file to process (must be a PromptResponse or AgentResponse) + """ + try: + # Serialize the file based on its type + try: + if file.type == "prompt": + serialized = client.prompts.serialize(id=file.id) + elif file.type == "agent": + serialized = client.agents.serialize(id=file.id) + else: + logger.warning(f"Skipping unsupported file type: {file.type}") + return + except ApiError as e: + # The SDK returns the YAML content in the error body when it can't parse as JSON + if e.status_code == 200: + serialized = e.body + else: + raise + except Exception as e: + logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}") + raise + + # Save to local filesystem + _save_serialized_file(serialized, file.path, file.type) + + except Exception as e: + logger.error(f"Error processing file {file.path}: {str(e)}") + raise + +def sync(client: "BaseHumanloop") -> List[str]: + """Sync prompt and agent files from Humanloop to local filesystem. + + :param client: Humanloop client instance + :return: List of successfully processed file paths + """ + successful_files = [] + failed_files = [] + + # Create a thread pool for processing files + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: + futures = [] + page = 1 + + while True: + try: + response = client.files.list_files( + type=["prompt", "agent"], + page=page + ) + + if len(response.records) == 0: + break + + # Submit each file for processing + for file in response.records: + future = executor.submit(_process_file, client, file) + futures.append((file.path, future)) + + page += 1 + except Exception as e: + logger.error(f"Failed to fetch page {page}: {str(e)}") + break + + # Wait for all tasks to complete + for file_path, future in futures: + try: + future.result() + successful_files.append(file_path) + except Exception as e: + failed_files.append(file_path) + logger.error(f"Task failed for {file_path}: {str(e)}") + + # Log summary + if successful_files: + logger.info(f"\nSynced {len(successful_files)} files") + if failed_files: + logger.error(f"Failed to sync {len(failed_files)} files") + + return successful_files \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py index c3e35481..272b0d3d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -217,8 +217,8 @@ def directory_cleanup(directory_id: str, humanloop_client: Humanloop): client = humanloop_client.evaluators # type: ignore [assignment] elif file.type == "flow": client = humanloop_client.flows # type: ignore [assignment] - else: - raise NotImplementedError(f"Unknown HL file type {file.type}") + elif file.type == "agent": + client = humanloop_client.agents # type: ignore [assignment] client.delete(file_id) for subdirectory in response.subdirectories: From e4165eb57e66310ad16161f3eddc38edb6b493ae Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Thu, 24 Apr 2025 15:46:32 +0100 Subject: [PATCH 05/10] fix type error and formatting --- src/humanloop/client.py | 14 ++++---- src/humanloop/sync/sync_utils.py | 59 +++++++++++++++++++------------- 2 files changed, 41 insertions(+), 32 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index e850de9c..9dd8fb28 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -146,9 +146,7 @@ def __init__( ) if opentelemetry_tracer is None: - self._opentelemetry_tracer = self._tracer_provider.get_tracer( - "humanloop.sdk" - ) + self._opentelemetry_tracer = self._tracer_provider.get_tracer("humanloop.sdk") else: self._opentelemetry_tracer = opentelemetry_tracer @@ -349,18 +347,18 @@ def agent(): path=path, attributes=attributes, ) - + def sync(self) -> List[str]: """Sync prompt and agent files from Humanloop to local filesystem. - + This method will: 1. Fetch all prompt and agent files from your Humanloop workspace 2. Save them to the local filesystem in a 'humanloop/' directory 3. Maintain the same directory structure as in Humanloop 4. Add appropriate file extensions (.prompt or .agent) - + Currently only supports syncing prompt and agent files. Other file types will be skipped. - + The files will be saved with the following structure: ``` humanloop/ @@ -371,7 +369,7 @@ def sync(self) -> List[str]: └── agents/ └── my_agent.agent ``` - + :return: List of successfully processed file paths """ return sync(self) diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py index 1e54abba..0e94260f 100644 --- a/src/humanloop/sync/sync_utils.py +++ b/src/humanloop/sync/sync_utils.py @@ -2,9 +2,9 @@ import logging from pathlib import Path import concurrent.futures -from typing import List, TYPE_CHECKING, Union +from typing import List, TYPE_CHECKING, Union, cast -from humanloop.types import FileType, PromptResponse, AgentResponse +from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse from humanloop.core.api_error import ApiError if TYPE_CHECKING: @@ -19,9 +19,10 @@ if not logger.hasHandlers(): logger.addHandler(console_handler) + def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None: """Save serialized file to local filesystem. - + :param serialized_content: The content to save :param file_path: The path where to save the file :param file_type: The type of file (prompt or agent) @@ -31,10 +32,10 @@ def _save_serialized_file(serialized_content: str, file_path: str, file_type: Fi full_path = Path("humanloop") / file_path # Create directory if it doesn't exist full_path.parent.mkdir(parents=True, exist_ok=True) - + # Add file type extension new_path = full_path.parent / f"{full_path.stem}.{file_type}" - + # Write content to file with open(new_path, "w") as f: f.write(serialized_content) @@ -43,15 +44,27 @@ def _save_serialized_file(serialized_content: str, file_path: str, file_type: Fi logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}") raise -def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse]) -> None: + +def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]) -> None: """Process a single file by serializing and saving it. - + Currently only supports prompt and agent files. Other file types will be skipped. - + :param client: Humanloop client instance :param file: The file to process (must be a PromptResponse or AgentResponse) """ try: + # Skip if not a prompt or agent + if file.type not in ["prompt", "agent"]: + logger.warning(f"Skipping unsupported file type: {file.type}") + return + + # Cast to the correct type for type checking + if file.type == "prompt": + file = cast(PromptResponse, file) + elif file.type == "agent": + file = cast(AgentResponse, file) + # Serialize the file based on its type try: if file.type == "prompt": @@ -70,48 +83,46 @@ def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResp except Exception as e: logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}") raise - + # Save to local filesystem _save_serialized_file(serialized, file.path, file.type) - + except Exception as e: logger.error(f"Error processing file {file.path}: {str(e)}") raise + def sync(client: "BaseHumanloop") -> List[str]: """Sync prompt and agent files from Humanloop to local filesystem. - + :param client: Humanloop client instance :return: List of successfully processed file paths """ successful_files = [] failed_files = [] - + # Create a thread pool for processing files with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: futures = [] page = 1 - + while True: try: - response = client.files.list_files( - type=["prompt", "agent"], - page=page - ) - + response = client.files.list_files(type=["prompt", "agent"], page=page) + if len(response.records) == 0: break - + # Submit each file for processing for file in response.records: future = executor.submit(_process_file, client, file) futures.append((file.path, future)) - + page += 1 except Exception as e: logger.error(f"Failed to fetch page {page}: {str(e)}") break - + # Wait for all tasks to complete for file_path, future in futures: try: @@ -120,11 +131,11 @@ def sync(client: "BaseHumanloop") -> List[str]: except Exception as e: failed_files.append(file_path) logger.error(f"Task failed for {file_path}: {str(e)}") - + # Log summary if successful_files: logger.info(f"\nSynced {len(successful_files)} files") if failed_files: logger.error(f"Failed to sync {len(failed_files)} files") - - return successful_files \ No newline at end of file + + return successful_files From a991bb194067a40d69849215a49db096405388db Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Fri, 25 Apr 2025 14:39:55 +0100 Subject: [PATCH 06/10] refactor sync utils into SyncClient class --- src/humanloop/client.py | 9 +- src/humanloop/sync/__init__.py | 4 +- src/humanloop/sync/sync_client.py | 161 ++++++++++++++++++++++++++++++ src/humanloop/sync/sync_utils.py | 141 -------------------------- 4 files changed, 168 insertions(+), 147 deletions(-) create mode 100644 src/humanloop/sync/sync_client.py delete mode 100644 src/humanloop/sync/sync_utils.py diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 9dd8fb28..26987bf4 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -24,7 +24,7 @@ from humanloop.otel.processor import HumanloopSpanProcessor from humanloop.prompt_utils import populate_template from humanloop.prompts.client import PromptsClient -from humanloop.sync import sync +from humanloop.sync.sync_client import SyncClient class ExtendedEvalsClient(EvaluationsClient): @@ -118,6 +118,7 @@ def __init__( httpx_client=httpx_client, ) + self.sync_client = SyncClient(client=self) eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper) eval_client.client = self self.evaluations = eval_client @@ -348,8 +349,8 @@ def agent(): attributes=attributes, ) - def sync(self) -> List[str]: - """Sync prompt and agent files from Humanloop to local filesystem. + def pull(self) -> List[str]: + """Pull prompt and agent files from Humanloop to local filesystem. This method will: 1. Fetch all prompt and agent files from your Humanloop workspace @@ -372,7 +373,7 @@ def sync(self) -> List[str]: :return: List of successfully processed file paths """ - return sync(self) + return self.sync_client.pull() class AsyncHumanloop(AsyncBaseHumanloop): diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py index 7b55e70e..007659df 100644 --- a/src/humanloop/sync/__init__.py +++ b/src/humanloop/sync/__init__.py @@ -1,3 +1,3 @@ -from humanloop.sync.sync_utils import sync +from humanloop.sync.sync_client import SyncClient -__all__ = ["sync"] +__all__ = ["SyncClient"] diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py new file mode 100644 index 00000000..979ada1e --- /dev/null +++ b/src/humanloop/sync/sync_client.py @@ -0,0 +1,161 @@ +import multiprocessing +import os +import logging +from pathlib import Path +import concurrent.futures +from typing import List, TYPE_CHECKING, Union, cast, Optional + +from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse +from humanloop.core.api_error import ApiError + +if TYPE_CHECKING: + from humanloop.base_client import BaseHumanloop + +# Set up logging +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +console_handler = logging.StreamHandler() +formatter = logging.Formatter("%(message)s") +console_handler.setFormatter(formatter) +if not logger.hasHandlers(): + logger.addHandler(console_handler) + +class SyncClient: + """Client for managing synchronization between local filesystem and Humanloop.""" + + def __init__( + self, + client: "BaseHumanloop", + base_dir: str = "humanloop", + max_workers: Optional[int] = None + ): + """ + Parameters + ---------- + client: Humanloop client instance + base_dir: Base directory for synced files (default: "humanloop") + max_workers: Maximum number of worker threads (default: CPU count * 2) + """ + self.client = client + self.base_dir = Path(base_dir) + self.max_workers = max_workers or multiprocessing.cpu_count() * 2 + + def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None: + """Save serialized file to local filesystem. + + Args: + serialized_content: The content to save + file_path: The path where to save the file + file_type: The type of file (prompt or agent) + """ + try: + # Create full path including base_dir prefix + full_path = self.base_dir / file_path + # Create directory if it doesn't exist + full_path.parent.mkdir(parents=True, exist_ok=True) + + # Add file type extension + new_path = full_path.parent / f"{full_path.stem}.{file_type}" + + # Write content to file + with open(new_path, "w") as f: + f.write(serialized_content) + logger.info(f"Syncing {file_type} {file_path}") + except Exception as e: + logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}") + raise + + def _process_file( + self, + file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse] + ) -> None: + """Process a single file by serializing and saving it. + + Args: + file: The file to process (must be a PromptResponse or AgentResponse) + """ + try: + # Skip if not a prompt or agent + if file.type not in ["prompt", "agent"]: + logger.warning(f"Skipping unsupported file type: {file.type}") + return + + # Cast to the correct type for type checking + if file.type == "prompt": + file = cast(PromptResponse, file) + elif file.type == "agent": + file = cast(AgentResponse, file) + + # Serialize the file based on its type + try: + if file.type == "prompt": + serialized = self.client.prompts.serialize(id=file.id) + elif file.type == "agent": + serialized = self.client.agents.serialize(id=file.id) + else: + logger.warning(f"Skipping unsupported file type: {file.type}") + return + except ApiError as e: + # The SDK returns the YAML content in the error body when it can't parse as JSON + if e.status_code == 200: + serialized = e.body + else: + raise + except Exception as e: + logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}") + raise + + # Save to local filesystem + self._save_serialized_file(serialized, file.path, file.type) + + except Exception as e: + logger.error(f"Error processing file {file.path}: {str(e)}") + raise + + def pull(self) -> List[str]: + """Sync prompt and agent files from Humanloop to local filesystem. + + Returns: + List of successfully processed file paths + """ + successful_files = [] + failed_files = [] + + # Create a thread pool for processing files + with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: + futures = [] + page = 1 + + while True: + try: + response = self.client.files.list_files(type=["prompt", "agent"], page=page) + + if len(response.records) == 0: + break + + # Submit each file for processing + for file in response.records: + future = executor.submit(self._process_file, file) + futures.append((file.path, future)) + + page += 1 + except Exception as e: + logger.error(f"Failed to fetch page {page}: {str(e)}") + break + + # Wait for all tasks to complete + for file_path, future in futures: + try: + future.result() + successful_files.append(file_path) + except Exception as e: + failed_files.append(file_path) + logger.error(f"Task failed for {file_path}: {str(e)}") + + # Log summary + if successful_files: + logger.info(f"\nSynced {len(successful_files)} files") + if failed_files: + logger.error(f"Failed to sync {len(failed_files)} files") + + return successful_files \ No newline at end of file diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py deleted file mode 100644 index 0e94260f..00000000 --- a/src/humanloop/sync/sync_utils.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -import logging -from pathlib import Path -import concurrent.futures -from typing import List, TYPE_CHECKING, Union, cast - -from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse -from humanloop.core.api_error import ApiError - -if TYPE_CHECKING: - from humanloop.base_client import BaseHumanloop - -# Set up logging -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) -console_handler = logging.StreamHandler() -formatter = logging.Formatter("%(message)s") -console_handler.setFormatter(formatter) -if not logger.hasHandlers(): - logger.addHandler(console_handler) - - -def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None: - """Save serialized file to local filesystem. - - :param serialized_content: The content to save - :param file_path: The path where to save the file - :param file_type: The type of file (prompt or agent) - """ - try: - # Create full path including humanloop/ prefix - full_path = Path("humanloop") / file_path - # Create directory if it doesn't exist - full_path.parent.mkdir(parents=True, exist_ok=True) - - # Add file type extension - new_path = full_path.parent / f"{full_path.stem}.{file_type}" - - # Write content to file - with open(new_path, "w") as f: - f.write(serialized_content) - logger.info(f"Syncing {file_type} {file_path}") - except Exception as e: - logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}") - raise - - -def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]) -> None: - """Process a single file by serializing and saving it. - - Currently only supports prompt and agent files. Other file types will be skipped. - - :param client: Humanloop client instance - :param file: The file to process (must be a PromptResponse or AgentResponse) - """ - try: - # Skip if not a prompt or agent - if file.type not in ["prompt", "agent"]: - logger.warning(f"Skipping unsupported file type: {file.type}") - return - - # Cast to the correct type for type checking - if file.type == "prompt": - file = cast(PromptResponse, file) - elif file.type == "agent": - file = cast(AgentResponse, file) - - # Serialize the file based on its type - try: - if file.type == "prompt": - serialized = client.prompts.serialize(id=file.id) - elif file.type == "agent": - serialized = client.agents.serialize(id=file.id) - else: - logger.warning(f"Skipping unsupported file type: {file.type}") - return - except ApiError as e: - # The SDK returns the YAML content in the error body when it can't parse as JSON - if e.status_code == 200: - serialized = e.body - else: - raise - except Exception as e: - logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}") - raise - - # Save to local filesystem - _save_serialized_file(serialized, file.path, file.type) - - except Exception as e: - logger.error(f"Error processing file {file.path}: {str(e)}") - raise - - -def sync(client: "BaseHumanloop") -> List[str]: - """Sync prompt and agent files from Humanloop to local filesystem. - - :param client: Humanloop client instance - :return: List of successfully processed file paths - """ - successful_files = [] - failed_files = [] - - # Create a thread pool for processing files - with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: - futures = [] - page = 1 - - while True: - try: - response = client.files.list_files(type=["prompt", "agent"], page=page) - - if len(response.records) == 0: - break - - # Submit each file for processing - for file in response.records: - future = executor.submit(_process_file, client, file) - futures.append((file.path, future)) - - page += 1 - except Exception as e: - logger.error(f"Failed to fetch page {page}: {str(e)}") - break - - # Wait for all tasks to complete - for file_path, future in futures: - try: - future.result() - successful_files.append(file_path) - except Exception as e: - failed_files.append(file_path) - logger.error(f"Task failed for {file_path}: {str(e)}") - - # Log summary - if successful_files: - logger.info(f"\nSynced {len(successful_files)} files") - if failed_files: - logger.error(f"Failed to sync {len(failed_files)} files") - - return successful_files From cfc026668813b0781b55ee5e423d5d1721a75476 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Fri, 25 Apr 2025 15:36:45 +0100 Subject: [PATCH 07/10] add client overloads for call method to use local files at specified path for agents and prompts --- src/humanloop/client.py | 14 +++++++++++- src/humanloop/overload.py | 46 +++++++++++++++++++++++++++++++++++++-- tests/sync/test_sync.py | 4 ++-- 3 files changed, 59 insertions(+), 5 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 26987bf4..28ddcd52 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -13,7 +13,7 @@ from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop -from humanloop.overload import overload_call, overload_log +from humanloop.overload import overload_call, overload_log, overload_call_with_local_files from humanloop.decorators.flow import flow as flow_decorator_factory from humanloop.decorators.prompt import prompt_decorator_factory from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory @@ -99,6 +99,7 @@ def __init__( httpx_client: typing.Optional[httpx.Client] = None, opentelemetry_tracer_provider: Optional[TracerProvider] = None, opentelemetry_tracer: Optional[Tracer] = None, + use_local_files: bool = False, ): """ Extends the base client with custom evaluation utilities and @@ -118,6 +119,7 @@ def __init__( httpx_client=httpx_client, ) + self.use_local_files = use_local_files self.sync_client = SyncClient(client=self) eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper) eval_client.client = self @@ -128,6 +130,16 @@ def __init__( # and the @flow decorator providing the trace_id self.prompts = overload_log(client=self.prompts) self.prompts = overload_call(client=self.prompts) + self.prompts = overload_call_with_local_files( + client=self.prompts, + use_local_files=self.use_local_files, + file_type="prompt" + ) + self.agents = overload_call_with_local_files( + client=self.agents, + use_local_files=self.use_local_files, + file_type="agent" + ) self.flows = overload_log(client=self.flows) self.tools = overload_log(client=self.tools) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index b0c83215..32bce735 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -1,8 +1,8 @@ import inspect import logging import types -from typing import TypeVar, Union - +from typing import TypeVar, Union, Literal +from pathlib import Path from humanloop.context import ( get_decorator_context, get_evaluation_context, @@ -13,6 +13,7 @@ from humanloop.evaluators.client import EvaluatorsClient from humanloop.flows.client import FlowsClient from humanloop.prompts.client import PromptsClient +from humanloop.agents.client import AgentsClient from humanloop.tools.client import ToolsClient from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse from humanloop.types.create_flow_log_response import CreateFlowLogResponse @@ -112,6 +113,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: } try: + logger.info(f"Calling inner overload") response = self._call(**kwargs) except Exception as e: # Re-raising as HumanloopDecoratorError so the decorators don't catch it @@ -122,3 +124,43 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: # Replace the original log method with the overloaded one client.call = types.MethodType(_overload_call, client) # type: ignore [assignment] return client + +def overload_call_with_local_files( + client: Union[PromptsClient, AgentsClient], + use_local_files: bool, + file_type: Literal["prompt", "agent"] +) -> Union[PromptsClient, AgentsClient]: + """Overload call to handle local files when use_local_files is True. + + Args: + client: The client to overload (PromptsClient or AgentsClient) + use_local_files: Whether to use local files + file_type: Type of file ("prompt" or "agent") + """ + original_call = client._call if hasattr(client, '_call') else client.call + + def _overload_call(self, **kwargs) -> PromptCallResponse: + if use_local_files and "path" in kwargs: + try: + # Construct path to local file + local_path = Path("humanloop") / kwargs["path"] + # Add appropriate extension + local_path = local_path.parent / f"{local_path.stem}.{file_type}" + + if local_path.exists(): + # Read the file content + with open(local_path) as f: + file_content = f.read() + + kwargs[file_type] = file_content # "prompt" or "agent" + + logger.debug(f"Using local file content from {local_path}") + else: + logger.warning(f"Local file not found: {local_path}, falling back to API") + except Exception as e: + logger.error(f"Error reading local file: {e}, falling back to API") + + return original_call(**kwargs) + + client.call = types.MethodType(_overload_call, client) + return client \ No newline at end of file diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py index af5fd6b1..520a1979 100644 --- a/tests/sync/test_sync.py +++ b/tests/sync/test_sync.py @@ -74,10 +74,10 @@ def cleanup_local_files(): shutil.rmtree(local_dir) -def test_sync_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files): +def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files): """Test that humanloop.sync() correctly syncs remote files to local filesystem""" # Run the sync - successful_files = humanloop_client.sync() + successful_files = humanloop_client.pull() # Verify each file was synced correctly for file in test_file_structure: From 1e2528175af1da624729f45b075f0b68eeea2b46 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Mon, 28 Apr 2025 16:38:44 +0100 Subject: [PATCH 08/10] infer file type from client passed into call overload --- src/humanloop/overload.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py index 32bce735..3caad33b 100644 --- a/src/humanloop/overload.py +++ b/src/humanloop/overload.py @@ -15,6 +15,7 @@ from humanloop.prompts.client import PromptsClient from humanloop.agents.client import AgentsClient from humanloop.tools.client import ToolsClient +from humanloop.types import FileType from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse from humanloop.types.create_flow_log_response import CreateFlowLogResponse from humanloop.types.create_prompt_log_response import CreatePromptLogResponse @@ -128,7 +129,6 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: def overload_call_with_local_files( client: Union[PromptsClient, AgentsClient], use_local_files: bool, - file_type: Literal["prompt", "agent"] ) -> Union[PromptsClient, AgentsClient]: """Overload call to handle local files when use_local_files is True. @@ -138,6 +138,14 @@ def overload_call_with_local_files( file_type: Type of file ("prompt" or "agent") """ original_call = client._call if hasattr(client, '_call') else client.call + # get file type from client type + file_type: FileType + if isinstance(client, PromptsClient): + file_type = "prompt" + elif isinstance(client, AgentsClient): + file_type = "agent" + else: + raise ValueError(f"Unsupported client type: {type(client)}") def _overload_call(self, **kwargs) -> PromptCallResponse: if use_local_files and "path" in kwargs: @@ -152,7 +160,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse: with open(local_path) as f: file_content = f.read() - kwargs[file_type] = file_content # "prompt" or "agent" + kwargs[file_type] = file_content # "prompt" or "agent" # TODO: raise warning if kernel passed in logger.debug(f"Using local file content from {local_path}") else: From 39b08e16157252a98845d53a1acadea65df69899 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Mon, 28 Apr 2025 16:45:28 +0100 Subject: [PATCH 09/10] simplify sync client to use updated list files endpoint that includes serialized content --- src/humanloop/client.py | 6 +- src/humanloop/sync/sync_client.py | 106 +++++++++--------------------- 2 files changed, 34 insertions(+), 78 deletions(-) diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 28ddcd52..64eaef7c 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -132,13 +132,11 @@ def __init__( self.prompts = overload_call(client=self.prompts) self.prompts = overload_call_with_local_files( client=self.prompts, - use_local_files=self.use_local_files, - file_type="prompt" + use_local_files=self.use_local_files ) self.agents = overload_call_with_local_files( client=self.agents, - use_local_files=self.use_local_files, - file_type="agent" + use_local_files=self.use_local_files ) self.flows = overload_log(client=self.flows) self.tools = overload_log(client=self.tools) diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py index 979ada1e..97a82df1 100644 --- a/src/humanloop/sync/sync_client.py +++ b/src/humanloop/sync/sync_client.py @@ -65,53 +65,6 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}") raise - def _process_file( - self, - file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse] - ) -> None: - """Process a single file by serializing and saving it. - - Args: - file: The file to process (must be a PromptResponse or AgentResponse) - """ - try: - # Skip if not a prompt or agent - if file.type not in ["prompt", "agent"]: - logger.warning(f"Skipping unsupported file type: {file.type}") - return - - # Cast to the correct type for type checking - if file.type == "prompt": - file = cast(PromptResponse, file) - elif file.type == "agent": - file = cast(AgentResponse, file) - - # Serialize the file based on its type - try: - if file.type == "prompt": - serialized = self.client.prompts.serialize(id=file.id) - elif file.type == "agent": - serialized = self.client.agents.serialize(id=file.id) - else: - logger.warning(f"Skipping unsupported file type: {file.type}") - return - except ApiError as e: - # The SDK returns the YAML content in the error body when it can't parse as JSON - if e.status_code == 200: - serialized = e.body - else: - raise - except Exception as e: - logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}") - raise - - # Save to local filesystem - self._save_serialized_file(serialized, file.path, file.type) - - except Exception as e: - logger.error(f"Error processing file {file.path}: {str(e)}") - raise - def pull(self) -> List[str]: """Sync prompt and agent files from Humanloop to local filesystem. @@ -120,37 +73,42 @@ def pull(self) -> List[str]: """ successful_files = [] failed_files = [] + page = 1 - # Create a thread pool for processing files - with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor: - futures = [] - page = 1 - - while True: - try: - response = self.client.files.list_files(type=["prompt", "agent"], page=page) - - if len(response.records) == 0: - break - - # Submit each file for processing - for file in response.records: - future = executor.submit(self._process_file, file) - futures.append((file.path, future)) + while True: + try: + response = self.client.files.list_files( + type=["prompt", "agent"], + page=page, + include_content=True + ) - page += 1 - except Exception as e: - logger.error(f"Failed to fetch page {page}: {str(e)}") + if len(response.records) == 0: break - # Wait for all tasks to complete - for file_path, future in futures: - try: - future.result() - successful_files.append(file_path) - except Exception as e: - failed_files.append(file_path) - logger.error(f"Task failed for {file_path}: {str(e)}") + # Process each file + for file in response.records: + # Skip if not a prompt or agent + if file.type not in ["prompt", "agent"]: + logger.warning(f"Skipping unsupported file type: {file.type}") + continue + + # Skip if no content + if not getattr(file, "content", None): + logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}") + continue + + try: + self._save_serialized_file(file.content, file.path, file.type) + successful_files.append(file.path) + except Exception as e: + failed_files.append(file.path) + logger.error(f"Task failed for {file.path}: {str(e)}") + + page += 1 + except Exception as e: + logger.error(f"Failed to fetch page {page}: {str(e)}") + break # Log summary if successful_files: From ecbb778016028ad1ac0c3e08c976996d6ac8b5f3 Mon Sep 17 00:00:00 2001 From: Ale Pouroullis Date: Mon, 28 Apr 2025 16:48:06 +0100 Subject: [PATCH 10/10] update test fixture to use staging --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 272b0d3d..20c8ae79 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -194,7 +194,7 @@ def api_keys() -> APIKeys: def humanloop_client(api_keys: APIKeys) -> Humanloop: return Humanloop( api_key=api_keys.humanloop, - base_url="http://localhost:80/v5", + base_url="https://neostaging.humanloop.ml/v5/", )