From 76a728b36ac0269982860d13b072674913156610 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Mon, 28 Apr 2025 16:07:10 +0000
Subject: [PATCH 01/39] Release 0.8.36
---
poetry.lock | 76 +-
pyproject.toml | 2 +-
reference.md | 5977 +++++++++++++----
src/humanloop/__init__.py | 237 +-
src/humanloop/agents/__init__.py | 49 +
src/humanloop/agents/client.py | 3208 +++++++++
src/humanloop/agents/raw_client.py | 3889 +++++++++++
src/humanloop/agents/requests/__init__.py | 25 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agent_log_request_tool_choice.py | 8 +
.../agent_request_reasoning_effort.py | 6 +
.../agents/requests/agent_request_stop.py | 5 +
.../agents/requests/agent_request_template.py | 6 +
.../requests/agent_request_tools_item.py | 7 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_request_tool_choice.py | 8 +
.../agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/agents/types/__init__.py | 25 +
.../agents/types/agent_log_request_agent.py | 6 +
.../types/agent_log_request_tool_choice.py | 8 +
.../types/agent_request_reasoning_effort.py | 6 +
.../agents/types/agent_request_stop.py | 5 +
.../agents/types/agent_request_template.py | 6 +
.../agents/types/agent_request_tools_item.py | 7 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_request_tool_choice.py | 8 +
.../types/agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/base_client.py | 4 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/files/client.py | 22 +-
src/humanloop/files/raw_client.py | 34 +-
...th_files_retrieve_by_path_post_response.py | 8 +-
...th_files_retrieve_by_path_post_response.py | 3 +-
src/humanloop/flows/client.py | 8 +-
src/humanloop/logs/client.py | 4 +-
src/humanloop/prompts/__init__.py | 16 +
src/humanloop/prompts/client.py | 265 +-
src/humanloop/prompts/raw_client.py | 333 +-
src/humanloop/prompts/requests/__init__.py | 8 +
.../requests/prompt_log_request_prompt.py | 6 +
.../prompt_request_reasoning_effort.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 8 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompt_request_reasoning_effort.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/requests/__init__.py | 88 +-
src/humanloop/requests/agent_call_response.py | 202 +
.../agent_call_response_tool_choice.py | 8 +
.../requests/agent_call_stream_response.py | 19 +
.../agent_call_stream_response_payload.py | 8 +
.../requests/agent_continue_response.py | 202 +
.../agent_continue_response_tool_choice.py | 8 +
.../agent_continue_stream_response.py | 19 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/requests/agent_inline_tool.py | 13 +
.../requests/agent_kernel_request.py | 112 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../requests/agent_kernel_request_stop.py | 5 +
.../requests/agent_kernel_request_template.py | 6 +
.../agent_kernel_request_tools_item.py | 7 +
.../requests/agent_linked_file_request.py | 13 +
.../requests/agent_linked_file_response.py | 19 +
.../agent_linked_file_response_file.py | 21 +
src/humanloop/requests/agent_log_response.py | 201 +
.../agent_log_response_tool_choice.py | 8 +
.../requests/agent_log_stream_response.py | 87 +
src/humanloop/requests/agent_response.py | 242 +
.../agent_response_reasoning_effort.py | 6 +
src/humanloop/requests/agent_response_stop.py | 5 +
.../requests/agent_response_template.py | 6 +
.../requests/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 12 +
.../requests/anthropic_thinking_content.py | 17 +
src/humanloop/requests/chat_message.py | 6 +
.../requests/chat_message_thinking_item.py | 7 +
.../requests/create_agent_log_response.py | 31 +
src/humanloop/requests/dataset_response.py | 5 +
...arents_and_children_response_files_item.py | 8 +-
src/humanloop/requests/evaluator_response.py | 5 +
.../file_environment_response_file.py | 8 +-
.../file_environment_variable_request.py | 15 +
src/humanloop/requests/flow_response.py | 5 +
src/humanloop/requests/linked_file_request.py | 10 +
src/humanloop/requests/list_agents.py | 12 +
src/humanloop/requests/log_response.py | 7 +-
src/humanloop/requests/log_stream_response.py | 7 +
.../requests/paginated_data_agent_response.py | 12 +
..._response_flow_response_agent_response.py} | 8 +-
...w_response_agent_response_records_item.py} | 14 +-
.../requests/populate_template_response.py | 16 +-
...late_template_response_reasoning_effort.py | 6 +
.../requests/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/requests/prompt_response.py | 16 +-
.../prompt_response_reasoning_effort.py | 6 +
.../requests/run_version_response.py | 3 +-
src/humanloop/requests/tool_call_response.py | 146 +
src/humanloop/requests/tool_log_response.py | 6 +
.../version_deployment_response_file.py | 8 +-
.../requests/version_id_response_version.py | 8 +-
src/humanloop/tools/client.py | 523 +-
src/humanloop/tools/raw_client.py | 765 ++-
src/humanloop/types/__init__.py | 96 +-
src/humanloop/types/agent_call_response.py | 224 +
.../types/agent_call_response_tool_choice.py | 8 +
.../types/agent_call_stream_response.py | 44 +
.../agent_call_stream_response_payload.py | 8 +
.../types/agent_continue_response.py | 224 +
.../agent_continue_response_tool_choice.py | 8 +
.../types/agent_continue_stream_response.py | 44 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/types/agent_inline_tool.py | 23 +
src/humanloop/types/agent_kernel_request.py | 122 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../types/agent_kernel_request_stop.py | 5 +
.../types/agent_kernel_request_template.py | 6 +
.../types/agent_kernel_request_tools_item.py | 7 +
.../types/agent_linked_file_request.py | 23 +
.../types/agent_linked_file_response.py | 39 +
.../types/agent_linked_file_response_file.py | 16 +
src/humanloop/types/agent_log_response.py | 224 +
.../types/agent_log_response_tool_choice.py | 8 +
.../types/agent_log_stream_response.py | 98 +
src/humanloop/types/agent_response.py | 265 +
.../types/agent_response_reasoning_effort.py | 6 +
src/humanloop/types/agent_response_stop.py | 5 +
.../types/agent_response_template.py | 6 +
.../types/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 23 +
.../types/anthropic_thinking_content.py | 28 +
src/humanloop/types/chat_message.py | 6 +
.../types/chat_message_thinking_item.py | 7 +
.../types/create_agent_log_response.py | 42 +
src/humanloop/types/dataset_response.py | 9 +
...tory_with_parents_and_children_response.py | 2 +
...arents_and_children_response_files_item.py | 3 +-
src/humanloop/types/evaluatee_response.py | 2 +
.../types/evaluation_evaluator_response.py | 2 +
.../types/evaluation_log_response.py | 3 +
src/humanloop/types/evaluation_response.py | 2 +
.../types/evaluation_run_response.py | 2 +
.../types/evaluation_runs_response.py | 2 +
src/humanloop/types/evaluator_log_response.py | 3 +
src/humanloop/types/evaluator_response.py | 11 +
src/humanloop/types/event_type.py | 21 +
.../types/file_environment_response.py | 2 +
.../types/file_environment_response_file.py | 3 +-
.../file_environment_variable_request.py | 27 +
src/humanloop/types/file_type.py | 2 +-
src/humanloop/types/files_tool_type.py | 2 +-
src/humanloop/types/flow_log_response.py | 3 +
src/humanloop/types/flow_response.py | 11 +
src/humanloop/types/linked_file_request.py | 21 +
src/humanloop/types/list_agents.py | 31 +
src/humanloop/types/list_evaluators.py | 2 +
src/humanloop/types/list_flows.py | 2 +
src/humanloop/types/list_prompts.py | 2 +
src/humanloop/types/list_tools.py | 2 +
src/humanloop/types/log_response.py | 5 +-
src/humanloop/types/log_stream_response.py | 7 +
src/humanloop/types/model_providers.py | 2 +-
.../types/monitoring_evaluator_response.py | 2 +
src/humanloop/types/on_agent_call_enum.py | 5 +
.../types/open_ai_reasoning_effort.py | 5 +
.../types/paginated_data_agent_response.py | 31 +
.../paginated_data_evaluation_log_response.py | 3 +
.../paginated_data_evaluator_response.py | 2 +
.../types/paginated_data_flow_response.py | 2 +
.../types/paginated_data_log_response.py | 3 +
.../types/paginated_data_prompt_response.py | 2 +
.../types/paginated_data_tool_response.py | 2 +
..._response_flow_response_agent_response.py} | 12 +-
...w_response_agent_response_records_item.py} | 7 +-
.../types/paginated_evaluation_response.py | 2 +
.../types/populate_template_response.py | 22 +-
...late_template_response_reasoning_effort.py | 6 +
src/humanloop/types/prompt_call_response.py | 2 +
src/humanloop/types/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/types/prompt_log_response.py | 3 +
src/humanloop/types/prompt_response.py | 22 +-
.../types/prompt_response_reasoning_effort.py | 6 +
src/humanloop/types/reasoning_effort.py | 5 -
src/humanloop/types/run_version_response.py | 3 +-
src/humanloop/types/tool_call_response.py | 168 +
src/humanloop/types/tool_log_response.py | 9 +
src/humanloop/types/tool_response.py | 2 +
.../types/version_deployment_response.py | 2 +
.../types/version_deployment_response_file.py | 3 +-
src/humanloop/types/version_id_response.py | 2 +
.../types/version_id_response_version.py | 3 +-
196 files changed, 17920 insertions(+), 1683 deletions(-)
create mode 100644 src/humanloop/agents/__init__.py
create mode 100644 src/humanloop/agents/client.py
create mode 100644 src/humanloop/agents/raw_client.py
create mode 100644 src/humanloop/agents/requests/__init__.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/requests/agent_request_stop.py
create mode 100644 src/humanloop/agents/requests/agent_request_template.py
create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/__init__.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/types/agent_request_stop.py
create mode 100644 src/humanloop/agents/types/agent_request_template.py
create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/requests/agent_call_response.py
create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_call_stream_response.py
create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_continue_response.py
create mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_inline_tool.py
create mode 100644 src/humanloop/requests/agent_kernel_request.py
create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py
create mode 100644 src/humanloop/requests/agent_kernel_request_template.py
create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/requests/agent_linked_file_request.py
create mode 100644 src/humanloop/requests/agent_linked_file_response.py
create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py
create mode 100644 src/humanloop/requests/agent_log_response.py
create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_log_stream_response.py
create mode 100644 src/humanloop/requests/agent_response.py
create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_response_stop.py
create mode 100644 src/humanloop/requests/agent_response_template.py
create mode 100644 src/humanloop/requests/agent_response_tools_item.py
create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/requests/anthropic_thinking_content.py
create mode 100644 src/humanloop/requests/chat_message_thinking_item.py
create mode 100644 src/humanloop/requests/create_agent_log_response.py
create mode 100644 src/humanloop/requests/file_environment_variable_request.py
create mode 100644 src/humanloop/requests/linked_file_request.py
create mode 100644 src/humanloop/requests/list_agents.py
create mode 100644 src/humanloop/requests/log_stream_response.py
create mode 100644 src/humanloop/requests/paginated_data_agent_response.py
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%)
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%)
create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/tool_call_response.py
create mode 100644 src/humanloop/types/agent_call_response.py
create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_call_stream_response.py
create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_continue_response.py
create mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_continue_stream_response.py
create mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_inline_tool.py
create mode 100644 src/humanloop/types/agent_kernel_request.py
create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_kernel_request_stop.py
create mode 100644 src/humanloop/types/agent_kernel_request_template.py
create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/types/agent_linked_file_request.py
create mode 100644 src/humanloop/types/agent_linked_file_response.py
create mode 100644 src/humanloop/types/agent_linked_file_response_file.py
create mode 100644 src/humanloop/types/agent_log_response.py
create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_log_stream_response.py
create mode 100644 src/humanloop/types/agent_response.py
create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_response_stop.py
create mode 100644 src/humanloop/types/agent_response_template.py
create mode 100644 src/humanloop/types/agent_response_tools_item.py
create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/types/anthropic_thinking_content.py
create mode 100644 src/humanloop/types/chat_message_thinking_item.py
create mode 100644 src/humanloop/types/create_agent_log_response.py
create mode 100644 src/humanloop/types/event_type.py
create mode 100644 src/humanloop/types/file_environment_variable_request.py
create mode 100644 src/humanloop/types/linked_file_request.py
create mode 100644 src/humanloop/types/list_agents.py
create mode 100644 src/humanloop/types/log_stream_response.py
create mode 100644 src/humanloop/types/on_agent_call_enum.py
create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py
create mode 100644 src/humanloop/types/paginated_data_agent_response.py
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (76%)
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%)
create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py
delete mode 100644 src/humanloop/types/reasoning_effort.py
create mode 100644 src/humanloop/types/tool_call_response.py
diff --git a/poetry.lock b/poetry.lock
index 4ce5d536..b3099902 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -78,13 +78,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
-version = "2025.1.31"
+version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
- {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
+ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
+ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.0"
+version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"},
- {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"},
+ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
+ {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
]
[package.dependencies]
@@ -403,29 +403,29 @@ typing-extensions = ">=4.10,<5"
[[package]]
name = "h11"
-version = "0.14.0"
+version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
+ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
-version = "1.0.8"
+version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
- {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
+ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
+ {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
-h11 = ">=0.13,<0.15"
+h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.75.0"
+version = "1.76.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"},
- {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"},
+ {file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"},
+ {file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"},
]
[package.dependencies]
@@ -931,13 +931,13 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"},
- {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"},
+ {file = "opentelemetry_instrumentation_anthropic-0.39.4-py3-none-any.whl", hash = "sha256:f3bebc66b5bfdb83fb6a238a15afbe81f690b3f5314cee76ecf8e35121711972"},
+ {file = "opentelemetry_instrumentation_anthropic-0.39.4.tar.gz", hash = "sha256:15a48d201c97db791b0a1d5e284956178e1d33923ce1c1b90a0735101b83a1a6"},
]
[package.dependencies]
@@ -948,13 +948,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"},
- {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"},
+ {file = "opentelemetry_instrumentation_bedrock-0.39.4-py3-none-any.whl", hash = "sha256:2e74d78b28f7d3928f13826477428aab2ea81e689a851514dda6bf787d0e43f3"},
+ {file = "opentelemetry_instrumentation_bedrock-0.39.4.tar.gz", hash = "sha256:78a988e58e72a11e29cdce4ddb8cfb790315c22d2e84539066fba8bc2c29da8e"},
]
[package.dependencies]
@@ -967,13 +967,13 @@ tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"},
- {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"},
+ {file = "opentelemetry_instrumentation_cohere-0.39.4-py3-none-any.whl", hash = "sha256:8408963b1fe1362ab84dd77723a98e54575bc71bf88e25d8252a8de94939773a"},
+ {file = "opentelemetry_instrumentation_cohere-0.39.4.tar.gz", hash = "sha256:0c1c209801dba0238119977e240acd05501a14a39850961c11effe47e4738780"},
]
[package.dependencies]
@@ -984,13 +984,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"},
- {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"},
+ {file = "opentelemetry_instrumentation_groq-0.39.4-py3-none-any.whl", hash = "sha256:631e0d2ada72f498721bc06be2bcf68ac656d3fac56180e6656bc7d7e53febc4"},
+ {file = "opentelemetry_instrumentation_groq-0.39.4.tar.gz", hash = "sha256:8ff5dd2e904af2128c9dd4e79d08264421750ca855731b7a983d6962df7244ca"},
]
[package.dependencies]
@@ -1001,13 +1001,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"},
- {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"},
+ {file = "opentelemetry_instrumentation_openai-0.39.4-py3-none-any.whl", hash = "sha256:94568157e29cb1e0780333b4c3eef42ae6cebb9dbf17383c2b8abcd1fd453bb8"},
+ {file = "opentelemetry_instrumentation_openai-0.39.4.tar.gz", hash = "sha256:6eaba7ddfe051fed9e33faccc580f38e8ca0da465e34a5a5848bfccfae5b4e21"},
]
[package.dependencies]
@@ -1019,13 +1019,13 @@ tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"},
- {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.39.4-py3-none-any.whl", hash = "sha256:4be73ca3af3afb2444b115618a001842503bea8f4ea0a640f70958d52a420b23"},
+ {file = "opentelemetry_instrumentation_replicate-0.39.4.tar.gz", hash = "sha256:7683ea3314e68aa2db3a0146a6778790ba64e04bc7e92254014b752c2e7bad40"},
]
[package.dependencies]
@@ -1729,13 +1729,13 @@ files = [
[[package]]
name = "replicate"
-version = "1.0.4"
+version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
files = [
- {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"},
- {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"},
+ {file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
+ {file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index ad96beec..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.35"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 27a32c92..4ec04c0e 100644
--- a/reference.md
+++ b/reference.md
@@ -56,7 +56,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1501,7 +1516,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2533,7 @@ client.prompts.update_monitoring(
-## Tools
-client.tools.log(...)
+client.prompts.serialize(...)
-
@@ -2531,15 +2545,13 @@ client.prompts.update_monitoring(
-
-Log to a Tool.
+Serialize a Prompt to the .prompt file format.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Tool. Otherwise the default deployed version will be chosen.
+Useful for storing the Prompt with your code in a version control system,
+or for editing with an AI tool.
-Instead of targeting an existing version explicitly, you can instead pass in
-Tool details in the request body. In this case, we will check if the details correspond
-to an existing version of the Tool, if not we will create a new version. This is helpful
-in the case where you are storing or deriving your Tool details in code.
+By default, the deployed version of the Prompt is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2571,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
+client.prompts.serialize(
+ id="id",
)
```
@@ -2593,7 +2589,7 @@ client.tools.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
+**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2597,7 @@ client.tools.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2605,7 @@ client.tools.log(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2613,72 @@ client.tools.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.prompts.deserialize(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deserialize a Prompt from the .prompt file format.
+
+This returns a subset of the attributes required by a Prompt.
+This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+#### 🔌 Usage
+
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.deserialize(
+ prompt="prompt",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+-
+
+**prompt:** `str`
@@ -2649,15 +2686,78 @@ client.tools.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## Tools
+client.tools.call(...)
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call a Tool.
+
+Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2765,7 @@ client.tools.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2773,7 @@ client.tools.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2781,7 @@ client.tools.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2789,7 @@ client.tools.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2821,7 @@ client.tools.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2829,7 @@ client.tools.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2837,7 @@ client.tools.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2845,7 @@ client.tools.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2853,7 @@ client.tools.log(
-
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2861,7 @@ client.tools.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2869,7 @@ client.tools.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2877,15 @@ client.tools.log(
-
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2905,7 @@ client.tools.log(
-client.tools.update(...)
+client.tools.log(...)
-
@@ -2809,9 +2917,15 @@ client.tools.log(
-
-Update a Log.
+Log to a Tool.
-Update the details of a Log with the given ID.
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool, if not we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2945,24 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update(
- id="id",
- log_id="log_id",
+client.tools.log(
+ path="math-tool",
+ tool={
+ "function": {
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": "number"},
+ },
+ "required": ["a", "b"],
+ },
+ }
+ },
+ inputs={"a": 5, "b": 7},
+ output="35",
)
```
@@ -2850,7 +2979,7 @@ client.tools.update(
-
-**id:** `str` — Unique identifier for Prompt.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2987,7 @@ client.tools.update(
-
-**log_id:** `str` — Unique identifier for the Log.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2995,7 @@ client.tools.update(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +3003,7 @@ client.tools.update(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +3011,7 @@ client.tools.update(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3019,7 @@ client.tools.update(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3027,7 @@ client.tools.update(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3035,7 @@ client.tools.update(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3043,7 @@ client.tools.update(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3051,7 @@ client.tools.update(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3059,7 @@ client.tools.update(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3067,7 @@ client.tools.update(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3075,7 @@ client.tools.update(
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3083,7 @@ client.tools.update(
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3091,7 @@ client.tools.update(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,74 +3099,31 @@ client.tools.update(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.tools.list(...)
-
-#### 📝 Description
-
-
--
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
-Get a list of all Tools.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.tools.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3045,7 +3131,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -3053,7 +3139,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -3061,7 +3147,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -3069,7 +3155,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
@@ -3077,7 +3163,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -3097,7 +3183,7 @@ for page in response.iter_pages():
-client.tools.upsert(...)
+client.tools.update(...)
-
@@ -3109,13 +3195,9 @@ for page in response.iter_pages():
-
-Create a Tool or update it with a new version if it already exists.
-
-Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+Update a Log.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Tool - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Update the details of a Log with the given ID.
@@ -3135,19 +3217,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
+client.tools.update(
+ id="id",
+ log_id="log_id",
)
```
@@ -3164,7 +3236,7 @@ client.tools.upsert(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Prompt.
@@ -3172,7 +3244,7 @@ client.tools.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**log_id:** `str` — Unique identifier for the Log.
@@ -3180,7 +3252,7 @@ client.tools.upsert(
-
-**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -3188,7 +3260,7 @@ client.tools.upsert(
-
-**source_code:** `typing.Optional[str]` — Code source of the Tool.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -3196,7 +3268,7 @@ client.tools.upsert(
-
-**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -3204,7 +3276,7 @@ client.tools.upsert(
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -3212,7 +3284,7 @@ client.tools.upsert(
-
-**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -3220,7 +3292,7 @@ client.tools.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -3228,7 +3300,7 @@ client.tools.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the Version.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -3236,72 +3308,31 @@ client.tools.upsert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
-
-
-
-
-
-
-
-client.tools.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
-Retrieve the Tool with the given ID.
-
-By default, the deployed version of the Tool is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Tool.
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get(
- id="tl_789ghi",
-)
-
-```
-
-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -3309,7 +3340,7 @@ client.tools.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -3317,7 +3348,7 @@ client.tools.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -3337,7 +3368,7 @@ client.tools.get(
-client.tools.delete(...)
+client.tools.list(...)
-
@@ -3349,7 +3380,7 @@ client.tools.get(
-
-Delete the Tool with the given ID.
+Get a list of all Tools.
@@ -3369,9 +3400,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete(
- id="tl_789ghi",
+response = client.tools.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -3387,7 +3423,7 @@ client.tools.delete(
-
-**id:** `str` — Unique identifier for Tool.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -3395,70 +3431,23 @@ client.tools.delete(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
-
-
-
-
-
-
-
-client.tools.move(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Move the Tool to a different path or change the name.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
-)
-
-```
-
-
+**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
@@ -3466,7 +3455,7 @@ client.tools.move(
-
-**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
@@ -3474,7 +3463,7 @@ client.tools.move(
-
-**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -3494,7 +3483,7 @@ client.tools.move(
-client.tools.list_versions(...)
+client.tools.upsert(...)
-
@@ -3506,7 +3495,13 @@ client.tools.move(
-
-Get a list of all the versions of a Tool.
+Create a Tool or update it with a new version if it already exists.
+
+Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Tool - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -3526,8 +3521,19 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_versions(
- id="tl_789ghi",
+client.tools.upsert(
+ path="math-tool",
+ function={
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
+ "required": ["a", "b"],
+ },
+ },
+ version_name="math-tool-v1",
+ version_description="Simple math tool that multiplies two numbers",
)
```
@@ -3544,7 +3550,7 @@ client.tools.list_versions(
-
-**id:** `str` — Unique identifier for the Tool.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -3552,7 +3558,63 @@ client.tools.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**id:** `typing.Optional[str]` — ID for an existing Tool.
+
+
+
+
+
+-
+
+**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+
+
+
+
+
+-
+
+**source_code:** `typing.Optional[str]` — Code source of the Tool.
+
+
+
+
+
+-
+
+**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+
+
+
+
+
+-
+
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+
+
+
+
+
+-
+
+**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -3572,7 +3634,7 @@ client.tools.list_versions(
-client.tools.delete_tool_version(...)
+client.tools.get(...)
-
@@ -3584,7 +3646,10 @@ client.tools.list_versions(
-
-Delete a version of the Tool.
+Retrieve the Tool with the given ID.
+
+By default, the deployed version of the Tool is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Tool.
@@ -3604,9 +3669,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
+client.tools.get(
+ id="tl_789ghi",
)
```
@@ -3631,7 +3695,15 @@ client.tools.delete_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -3651,7 +3723,7 @@ client.tools.delete_tool_version(
-client.tools.update_tool_version(...)
+client.tools.delete(...)
-
@@ -3663,7 +3735,7 @@ client.tools.delete_tool_version(
-
-Update the name or description of the Tool version.
+Delete the Tool with the given ID.
@@ -3683,9 +3755,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_tool_version(
- id="id",
- version_id="version_id",
+client.tools.delete(
+ id="tl_789ghi",
)
```
@@ -3710,30 +3781,6 @@ client.tools.update_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
-
-
-
-
-
--
-
-**description:** `typing.Optional[str]` — Description of the version.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3746,7 +3793,7 @@ client.tools.update_tool_version(
-client.tools.set_deployment(...)
+client.tools.move(...)
-
@@ -3758,10 +3805,7 @@ client.tools.update_tool_version(
-
-Deploy Tool to an Environment.
-
-Set the deployed version for the specified Environment. This Prompt
-will be used for calls made to the Tool in this Environment.
+Move the Tool to a different path or change the name.
@@ -3781,10 +3825,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.set_deployment(
+client.tools.move(
id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
+ path="new directory/new name",
)
```
@@ -3809,7 +3852,7 @@ client.tools.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
@@ -3817,7 +3860,7 @@ client.tools.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
@@ -3837,7 +3880,7 @@ client.tools.set_deployment(
-client.tools.remove_deployment(...)
+client.tools.list_versions(...)
-
@@ -3849,10 +3892,7 @@ client.tools.set_deployment(
-
-Remove deployed Tool from the Environment.
-
-Remove the deployed version for the specified Environment. This Tool
-will no longer be used for calls made to the Tool in this Environment.
+Get a list of all the versions of a Tool.
@@ -3872,9 +3912,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.remove_deployment(
+client.tools.list_versions(
id="tl_789ghi",
- environment_id="staging",
)
```
@@ -3891,7 +3930,7 @@ client.tools.remove_deployment(
-
-**id:** `str` — Unique identifier for Tool.
+**id:** `str` — Unique identifier for the Tool.
@@ -3899,7 +3938,7 @@ client.tools.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -3919,7 +3958,7 @@ client.tools.remove_deployment(
-client.tools.list_environments(...)
+client.tools.delete_tool_version(...)
-
@@ -3931,7 +3970,7 @@ client.tools.remove_deployment(
-
-List all Environments and their deployed versions for the Tool.
+Delete a version of the Tool.
@@ -3951,8 +3990,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_environments(
- id="tl_789ghi",
+client.tools.delete_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -3977,6 +4017,14 @@ client.tools.list_environments(
-
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3989,7 +4037,7 @@ client.tools.list_environments(
-client.tools.update_monitoring(...)
+client.tools.update_tool_version(...)
-
@@ -4001,10 +4049,7 @@ client.tools.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Tool.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Tool for monitoring purposes.
+Update the name or description of the Tool version.
@@ -4024,9 +4069,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.tools.update_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -4043,7 +4088,7 @@ client.tools.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Tool.
@@ -4051,9 +4096,7 @@ client.tools.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4061,9 +4104,15 @@ client.tools.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -4083,8 +4132,7 @@ client.tools.update_monitoring(
-## Datasets
-client.datasets.list(...)
+client.tools.set_deployment(...)
-
@@ -4096,7 +4144,10 @@ client.tools.update_monitoring(
-
-List all Datasets.
+Deploy Tool to an Environment.
+
+Set the deployed version for the specified Environment. This Prompt
+will be used for calls made to the Tool in this Environment.
@@ -4116,14 +4167,11 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list(
- size=1,
+client.tools.set_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
+ version_id="tv_012jkl",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4139,7 +4187,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for Tool.
@@ -4147,7 +4195,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -4155,7 +4203,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4163,47 +4211,40 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
-
+
+client.tools.remove_deployment(...)
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
-
-
+#### 📝 Description
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+
+-
+Remove deployed Tool from the Environment.
+Remove the deployed version for the specified Environment. This Tool
+will no longer be used for calls made to the Tool in this Environment.
+
+
-
-
-client.datasets.upsert(...)
-
--
-#### 📝 Description
+#### 🔌 Usage
-
@@ -4211,70 +4252,15 @@ for page in response.iter_pages():
-
-Create a Dataset or update it with a new version if it already exists.
-
-Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
-
-By default, the new Dataset version will be set to the list of Datapoints provided in
-the request. You can also create a new version by adding or removing Datapoints from an existing version
-by specifying `action` as `add` or `remove` respectively. In this case, you may specify
-the `version_id` or `environment` query parameters to identify the existing version to base
-the new version on. If neither is provided, the latest created version will be used.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Dataset - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
-
-Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
-exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
-you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
+```python
+from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upsert(
- path="datasets/support-queries",
- datapoints=[
- {
- "messages": [
- {
- "role": "user",
- "content": "How do i manage my organizations API keys?\n",
- }
- ],
- "target": {
- "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
- },
- },
- {
- "messages": [
- {
- "role": "user",
- "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
- }
- ],
- "target": {
- "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
- },
- },
- ],
- version_name="Initial version",
- version_description="Add two new questions and answers",
+client.tools.remove_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
)
```
@@ -4291,7 +4277,7 @@ client.datasets.upsert(
-
-**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
+**id:** `str` — Unique identifier for Tool.
@@ -4299,7 +4285,7 @@ client.datasets.upsert(
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -4307,71 +4293,69 @@ client.datasets.upsert(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
+client.tools.list_environments(...)
-
-**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
+#### 📝 Description
-
-**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
+-
+
+List all Environments and their deployed versions for the Tool.
+
+
+#### 🔌 Usage
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+
+-
-The action to take with the provided Datapoints.
+```python
+from humanloop import Humanloop
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.list_environments(
+ id="tl_789ghi",
+)
-If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+```
-
-
--
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+#### ⚙️ Parameters
+
-
-**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
-
-
-
-
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**id:** `str` — Unique identifier for Tool.
@@ -4391,7 +4375,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-client.datasets.get(...)
+client.tools.update_monitoring(...)
-
@@ -4403,15 +4387,10 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-
-Retrieve the Dataset with the given ID.
-
-Unless `include_datapoints` is set to `true`, the response will not include
-the Datapoints.
-Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
-retrieve Datapoints for a large Dataset.
+Activate and deactivate Evaluators for monitoring the Tool.
-By default, the deployed version of the Dataset is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Dataset.
+An activated Evaluator will automatically be run on all new Logs
+within the Tool for monitoring purposes.
@@ -4431,10 +4410,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
+client.tools.update_monitoring(
+ id="tl_789ghi",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
)
```
@@ -4451,15 +4429,7 @@ client.datasets.get(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+**id:** `str`
@@ -4467,7 +4437,9 @@ client.datasets.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -4475,7 +4447,9 @@ client.datasets.get(
-
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -4495,24 +4469,10 @@ client.datasets.get(
-client.datasets.delete(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.get_environment_variables(...)
-
-Delete the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4527,7 +4487,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete(
+client.tools.get_environment_variables(
id="id",
)
@@ -4545,7 +4505,7 @@ client.datasets.delete(
-
-**id:** `str` — Unique identifier for Dataset.
+**id:** `str` — Unique identifier for File.
@@ -4565,7 +4525,7 @@ client.datasets.delete(
-client.datasets.move(...)
+client.tools.add_environment_variable(...)
-
@@ -4577,7 +4537,7 @@ client.datasets.delete(
-
-Move the Dataset to a different path or change the name.
+Add an environment variable to a Tool.
@@ -4597,8 +4557,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.move(
+client.tools.add_environment_variable(
id="id",
+ request=[{"name": "name", "value": "value"}],
)
```
@@ -4615,15 +4576,7 @@ client.datasets.move(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Tool.
@@ -4631,7 +4584,7 @@ client.datasets.move(
-
-**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
@@ -4651,24 +4604,10 @@ client.datasets.move(
-client.datasets.list_datapoints(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.delete_environment_variable(...)
-
-List all Datapoints for the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4683,15 +4622,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
+client.tools.delete_environment_variable(
+ id="id",
+ name="name",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4707,31 +4641,7 @@ for page in response.iter_pages():
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
-
-
-
-
--
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
-
-
-
-
--
-
-**page:** `typing.Optional[int]` — Page number for pagination.
+**id:** `str` — Unique identifier for File.
@@ -4739,7 +4649,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+**name:** `str` — Name of the Environment Variable to delete.
@@ -4759,7 +4669,8 @@ for page in response.iter_pages():
-client.datasets.list_versions(...)
+## Datasets
+client.datasets.list(...)
-
@@ -4771,7 +4682,7 @@ for page in response.iter_pages():
-
-Get a list of the versions for a Dataset.
+List all Datasets.
@@ -4791,9 +4702,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_versions(
- id="ds_b0baF1ca7652",
+response = client.datasets.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -4809,7 +4725,7 @@ client.datasets.list_versions(
-
-**id:** `str` — Unique identifier for Dataset.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -4817,7 +4733,39 @@ client.datasets.list_versions(
-
-**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -4837,7 +4785,7 @@ client.datasets.list_versions(
-client.datasets.delete_dataset_version(...)
+client.datasets.upsert(...)
-
@@ -4849,7 +4797,23 @@ client.datasets.list_versions(
-
-Delete a version of the Dataset.
+Create a Dataset or update it with a new version if it already exists.
+
+Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
+
+By default, the new Dataset version will be set to the list of Datapoints provided in
+the request. You can also create a new version by adding or removing Datapoints from an existing version
+by specifying `action` as `add` or `remove` respectively. In this case, you may specify
+the `version_id` or `environment` query parameters to identify the existing version to base
+the new version on. If neither is provided, the latest created version will be used.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Dataset - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
+exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
+you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
@@ -4869,9 +4833,34 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
+client.datasets.upsert(
+ path="datasets/support-queries",
+ datapoints=[
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "How do i manage my organizations API keys?\n",
+ }
+ ],
+ "target": {
+ "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
+ },
+ },
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
+ }
+ ],
+ "target": {
+ "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
+ },
+ },
+ ],
+ version_name="Initial version",
+ version_description="Add two new questions and answers",
)
```
@@ -4888,7 +4877,7 @@ client.datasets.delete_dataset_version(
-
-**id:** `str` — Unique identifier for Dataset.
+**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
@@ -4896,7 +4885,7 @@ client.datasets.delete_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
@@ -4904,70 +4893,47 @@ client.datasets.delete_dataset_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
-
-
-
-
-
-
-client.datasets.update_dataset_version(...)
-
-#### 📝 Description
-
-
--
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+
+
+
-
-Update the name or description of the Dataset version.
-
-
+**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**id:** `typing.Optional[str]` — ID for an existing Dataset.
+
-#### ⚙️ Parameters
-
-
-
--
+**action:** `typing.Optional[UpdateDatesetAction]`
-**id:** `str` — Unique identifier for Dataset.
+The action to take with the provided Datapoints.
+
+ - If `"set"`, the created version will only contain the Datapoints provided in this request.
+ - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+ - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+
+If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
@@ -4975,7 +4941,7 @@ client.datasets.update_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
@@ -4983,7 +4949,7 @@ client.datasets.update_dataset_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
@@ -4991,7 +4957,7 @@ client.datasets.update_dataset_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -5011,7 +4977,7 @@ client.datasets.update_dataset_version(
-client.datasets.upload_csv(...)
+client.datasets.get(...)
-
@@ -5023,17 +4989,15 @@ client.datasets.update_dataset_version(
-
-Add Datapoints from a CSV file to a Dataset.
-
-This will create a new version of the Dataset with the Datapoints from the CSV file.
+Retrieve the Dataset with the given ID.
-If either `version_id` or `environment` is provided, the new version will be based on the specified version,
-with the Datapoints from the CSV file added to the existing Datapoints in the version.
-If neither `version_id` nor `environment` is provided, the new version will be based on the version
-of the Dataset that is deployed to the default Environment.
+Unless `include_datapoints` is set to `true`, the response will not include
+the Datapoints.
+Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
+retrieve Datapoints for a large Dataset.
-You can optionally provide a name and description for the new version using `version_name`
-and `version_description` parameters.
+By default, the deployed version of the Dataset is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Dataset.
@@ -5053,8 +5017,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upload_csv(
- id="id",
+client.datasets.get(
+ id="ds_b0baF1ca7652",
+ version_id="dsv_6L78pqrdFi2xa",
+ include_datapoints=True,
)
```
@@ -5071,25 +5037,7 @@ client.datasets.upload_csv(
-
-**id:** `str` — Unique identifier for the Dataset
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
+**id:** `str` — Unique identifier for Dataset.
@@ -5097,7 +5045,7 @@ core.File` — See core.File for more documentation
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
@@ -5105,7 +5053,7 @@ core.File` — See core.File for more documentation
-
-**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -5113,7 +5061,7 @@ core.File` — See core.File for more documentation
-
-**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
@@ -5133,7 +5081,7 @@ core.File` — See core.File for more documentation
-client.datasets.set_deployment(...)
+client.datasets.delete(...)
-
@@ -5145,9 +5093,7 @@ core.File` — See core.File for more documentation
-
-Deploy Dataset to Environment.
-
-Set the deployed version for the specified Environment.
+Delete the Dataset with the given ID.
@@ -5167,10 +5113,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
+client.datasets.delete(
+ id="id",
)
```
@@ -5195,22 +5139,6 @@ client.datasets.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5223,7 +5151,7 @@ client.datasets.set_deployment(
-client.datasets.remove_deployment(...)
+client.datasets.move(...)
-
@@ -5235,9 +5163,7 @@ client.datasets.set_deployment(
-
-Remove deployed Dataset from Environment.
-
-Remove the deployed version for the specified Environment.
+Move the Dataset to a different path or change the name.
@@ -5257,9 +5183,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
+client.datasets.move(
+ id="id",
)
```
@@ -5284,7 +5209,15 @@ client.datasets.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
@@ -5304,7 +5237,7 @@ client.datasets.remove_deployment(
-client.datasets.list_environments(...)
+client.datasets.list_datapoints(...)
-
@@ -5316,7 +5249,7 @@ client.datasets.remove_deployment(
-
-List all Environments and their deployed versions for the Dataset.
+List all Datapoints for the Dataset with the given ID.
@@ -5336,9 +5269,15 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_environments(
- id="id",
+response = client.datasets.list_datapoints(
+ id="ds_b0baF1ca7652",
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -5362,6 +5301,38 @@ client.datasets.list_environments(
-
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5374,8 +5345,7 @@ client.datasets.list_environments(
-## Evaluators
-client.evaluators.log(...)
+client.datasets.list_versions(...)
-
@@ -5387,9 +5357,7 @@ client.datasets.list_environments(
-
-Submit Evaluator judgment for an existing Log.
-
-Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+Get a list of the versions for a Dataset.
@@ -5409,8 +5377,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.log(
- parent_id="parent_id",
+client.datasets.list_versions(
+ id="ds_b0baF1ca7652",
)
```
@@ -5427,7 +5395,7 @@ client.evaluators.log(
-
-**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
+**id:** `str` — Unique identifier for Dataset.
@@ -5435,7 +5403,7 @@ client.evaluators.log(
-
-**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
+**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
@@ -5443,103 +5411,70 @@ client.evaluators.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
--
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
+client.datasets.delete_dataset_version(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
-
-
+#### 📝 Description
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
-
-
-
-
-**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+Delete a version of the Dataset.
-
-
--
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
-
-
+```python
+from humanloop import Humanloop
-
--
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.delete_dataset_version(
+ id="id",
+ version_id="version_id",
+)
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+```
-
-
--
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+#### ⚙️ Parameters
+
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
-
-
-
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `str` — Unique identifier for Dataset.
@@ -5547,7 +5482,7 @@ client.evaluators.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5555,71 +5490,70 @@ client.evaluators.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
+client.datasets.update_dataset_version(...)
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
-
-
+#### 📝 Description
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
-
-
-
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+Update the name or description of the Dataset version.
+
+
+
+#### 🔌 Usage
-
-**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
-
-
-
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.update_dataset_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
-
-
-
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+**id:** `str` — Unique identifier for Dataset.
@@ -5627,7 +5561,7 @@ client.evaluators.log(
-
-**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5635,7 +5569,7 @@ client.evaluators.log(
-
-**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -5643,7 +5577,7 @@ client.evaluators.log(
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+**description:** `typing.Optional[str]` — Description of the version.
@@ -5663,7 +5597,7 @@ client.evaluators.log(
-client.evaluators.list(...)
+client.datasets.upload_csv(...)
-
@@ -5675,7 +5609,17 @@ client.evaluators.log(
-
-Get a list of all Evaluators.
+Add Datapoints from a CSV file to a Dataset.
+
+This will create a new version of the Dataset with the Datapoints from the CSV file.
+
+If either `version_id` or `environment` is provided, the new version will be based on the specified version,
+with the Datapoints from the CSV file added to the existing Datapoints in the version.
+If neither `version_id` nor `environment` is provided, the new version will be based on the version
+of the Dataset that is deployed to the default Environment.
+
+You can optionally provide a name and description for the new version using `version_name`
+and `version_description` parameters.
@@ -5695,14 +5639,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.evaluators.list(
- size=1,
+client.datasets.upload_csv(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -5718,7 +5657,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for the Dataset
@@ -5726,7 +5665,9 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+**file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5734,7 +5675,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
@@ -5742,7 +5683,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
@@ -5750,7 +5691,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
@@ -5758,7 +5699,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
@@ -5778,7 +5719,7 @@ for page in response.iter_pages():
-client.evaluators.upsert(...)
+client.datasets.set_deployment(...)
-
@@ -5790,13 +5731,9 @@ for page in response.iter_pages():
-
-Create an Evaluator or update it with a new version if it already exists.
-
-Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+Deploy Dataset to Environment.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within an Evaluator - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Set the deployed version for the specified Environment.
@@ -5816,19 +5753,13 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
-)
-
-```
+client.datasets.set_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
+ version_id="dsv_6L78pqrdFi2xa",
+)
+
+```
@@ -5842,23 +5773,7 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
-
-
--
-
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -5866,7 +5781,7 @@ client.evaluators.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -5874,7 +5789,7 @@ client.evaluators.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5894,7 +5809,7 @@ client.evaluators.upsert(
-client.evaluators.get(...)
+client.datasets.remove_deployment(...)
-
@@ -5906,10 +5821,9 @@ client.evaluators.upsert(
-
-Retrieve the Evaluator with the given ID.
+Remove deployed Dataset from Environment.
-By default, the deployed version of the Evaluator is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Evaluator.
+Remove the deployed version for the specified Environment.
@@ -5929,8 +5843,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.get(
- id="ev_890bcd",
+client.datasets.remove_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
)
```
@@ -5947,15 +5862,7 @@ client.evaluators.get(
-
-**id:** `str` — Unique identifier for Evaluator.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+**id:** `str` — Unique identifier for Dataset.
@@ -5963,7 +5870,7 @@ client.evaluators.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -5983,7 +5890,7 @@ client.evaluators.get(
-client.evaluators.delete(...)
+client.datasets.list_environments(...)
-
@@ -5995,7 +5902,7 @@ client.evaluators.get(
-
-Delete the Evaluator with the given ID.
+List all Environments and their deployed versions for the Dataset.
@@ -6015,8 +5922,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.delete(
- id="ev_890bcd",
+client.datasets.list_environments(
+ id="id",
)
```
@@ -6033,7 +5940,7 @@ client.evaluators.delete(
-
-**id:** `str` — Unique identifier for Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -6053,7 +5960,8 @@ client.evaluators.delete(
-client.evaluators.move(...)
+## Evaluators
+client.evaluators.log(...)
-
@@ -6065,7 +5973,9 @@ client.evaluators.delete(
-
-Move the Evaluator to a different path or change the name.
+Submit Evaluator judgment for an existing Log.
+
+Creates a new Log. The evaluated Log will be set as the parent of the created Log.
@@ -6085,9 +5995,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
+client.evaluators.log(
+ parent_id="parent_id",
)
```
@@ -6104,7 +6013,7 @@ client.evaluators.move(
-
-**id:** `str` — Unique identifier for Evaluator.
+**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
@@ -6112,7 +6021,7 @@ client.evaluators.move(
-
-**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
@@ -6120,7 +6029,7 @@ client.evaluators.move(
-
-**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6128,69 +6037,3435 @@ client.evaluators.move(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+-
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
-
-client.evaluators.list_versions(...)
-
-#### 📝 Description
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
-
+**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
+
+
+
+
-
-Get a list of all the versions of an Evaluator.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
-#### 🔌 Usage
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
-
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_versions(
- id="ev_890bcd",
-)
+
+-
-```
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
-#### ⚙️ Parameters
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+
+
+
+
+
+-
+
+**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+
+
+
+
+
+-
+
+**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+
+
+
+
+
+-
+
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+
+
+
-
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.evaluators.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page offset for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Evaluator or update it with a new version if it already exists.
+
+Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Evaluator - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.upsert(
+ path="Shared Evaluators/Accuracy Evaluator",
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "number",
+ "evaluator_type": "python",
+ "code": "def evaluate(answer, target):\n return 0.5",
+ },
+ version_name="simple-evaluator",
+ version_description="Simple evaluator that returns 0.5",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**spec:** `EvaluatorRequestSpecParams`
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Evaluator with the given ID.
+
+By default, the deployed version of the Evaluator is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.get(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Evaluator with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Evaluator to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.move(
+ id="ev_890bcd",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of an Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_versions(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for the Evaluator.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.delete_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Evaluator version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Evaluator to an Environment.
+
+Set the deployed version for the specified Environment. This Evaluator
+will be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.set_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+ version_id="evv_012def",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Evaluator from the Environment.
+
+Remove the deployed version for the specified Environment. This Evaluator
+will no longer be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.remove_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_environments(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Evaluator.
+
+An activated Evaluator will automatically be run on all new Logs
+within the Evaluator for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_monitoring(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Flows
+client.flows.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Log to a Flow.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
+If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.log(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ flow={
+ "attributes": {
+ "prompt": {
+ "template": "You are a helpful assistant helping with medical anamnesis",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ }
+ },
+ inputs={
+ "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="incomplete",
+ start_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:35+00:00",
+ ),
+ end_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:39+00:00",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the status, inputs, output of a Flow Log.
+
+Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
+Inputs and output (or error) must be provided in order to mark it as complete.
+
+The end_time log attribute will be set to match the time the log is marked as complete.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_log(
+ log_id="medqa_experiment_0001",
+ inputs={
+ "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — Unique identifier of the Flow Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Flow with the given ID.
+
+By default, the deployed version of the Flow is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.get(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Flow with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Flow to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.move(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Flow.
+
+
+
+
+
+-
+
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of Flows.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.flows.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create or update a Flow.
+
+Flows can also be identified by the `ID` or their `path`.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Flow - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.upsert(
+ path="Personal Projects/MedQA Flow",
+ attributes={
+ "prompt": {
+ "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ "version_name": "medqa-flow-v1",
+ "version_description": "Initial version",
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of a Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_versions(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Flow version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Flow to an Environment.
+
+Set the deployed version for the specified Environment. This Flow
+will be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.set_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+ version_id="flv_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Flow from the Environment.
+
+Remove the deployed version for the specified Environment. This Flow
+will no longer be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.remove_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_environments(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Flow.
+
+An activated Evaluator will automatically be run on all new "completed" Logs
+within the Flow for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_monitoring(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Agents
+client.agents.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Agent Log.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.log()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
+
+
+
+
+
+-
+
+**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
+
+
+
+
+
+-
+
+**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
+
+
+
+
+
+-
+
+**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
+
+
+
+
+
+-
+
+**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
+
+
+
+
+
+-
+
+**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
+
+
+
+
+
+-
+
+**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.update_log(
+ id="id",
+ log_id="log_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.call_stream()
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
-
-**id:** `str` — Unique identifier for the Evaluator.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -6198,7 +9473,7 @@ client.evaluators.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -6206,70 +9481,71 @@ client.evaluators.list_versions(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
+
+-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
-
-client.evaluators.delete_evaluator_version(...)
-
-#### 📝 Description
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
-
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
-
-Delete a version of the Evaluator.
-
-
+**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
-#### 🔌 Usage
-
-
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
-#### ⚙️ Parameters
-
-
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6277,7 +9553,7 @@ client.evaluators.delete_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6297,7 +9573,7 @@ client.evaluators.delete_evaluator_version(
-client.evaluators.update_evaluator_version(...)
+client.agents.call(...)
-
@@ -6309,7 +9585,18 @@ client.evaluators.delete_evaluator_version(
-
-Update the name or description of the Evaluator version.
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
@@ -6329,10 +9616,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
-)
+client.agents.call()
```
@@ -6348,7 +9632,7 @@ client.evaluators.update_evaluator_version(
-
-**id:** `str` — Unique identifier for Evaluator.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
@@ -6356,7 +9640,7 @@ client.evaluators.update_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6364,7 +9648,7 @@ client.evaluators.update_evaluator_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6372,7 +9656,7 @@ client.evaluators.update_evaluator_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6380,74 +9664,58 @@ client.evaluators.update_evaluator_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
-
-
-
-
-
-
-client.evaluators.set_deployment(...)
-
-#### 📝 Description
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
-
--
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
-
-Deploy Evaluator to an Environment.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
-Set the deployed version for the specified Environment. This Evaluator
-will be used for calls made to the Evaluator in this Environment.
-
-
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
-)
-
-```
-
-
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
-#### ⚙️ Parameters
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
@@ -6455,7 +9723,7 @@ client.evaluators.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -6463,7 +9731,7 @@ client.evaluators.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -6471,73 +9739,71 @@ client.evaluators.set_deployment(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
+
+-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
-
-client.evaluators.remove_deployment(...)
-
-#### 📝 Description
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
-
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
-
-Remove deployed Evaluator from the Environment.
-
-Remove the deployed version for the specified Environment. This Evaluator
-will no longer be used for calls made to the Evaluator in this Environment.
-
-
+**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
-#### 🔌 Usage
-
-
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
-)
-
-```
-
-
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
-#### ⚙️ Parameters
-
-
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6545,7 +9811,7 @@ client.evaluators.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6565,7 +9831,7 @@ client.evaluators.remove_deployment(
-client.evaluators.list_environments(...)
+client.agents.continue_stream(...)
-
@@ -6577,7 +9843,15 @@ client.evaluators.remove_deployment(
-
-List all Environments and their deployed versions for the Evaluator.
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -6597,9 +9871,12 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_environments(
- id="ev_890bcd",
+response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
+for chunk in response.data:
+ yield chunk
```
@@ -6615,7 +9892,31 @@ client.evaluators.list_environments(
-
-**id:** `str` — Unique identifier for Evaluator.
+**log_id:** `str` — This identifies the Agent Log to continue.
+
+
+
+
+
+-
+
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6635,7 +9936,7 @@ client.evaluators.list_environments(
-client.evaluators.update_monitoring(...)
+client.agents.continue_(...)
-
@@ -6647,10 +9948,15 @@ client.evaluators.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Evaluator.
+Continue an incomplete Agent call.
-An activated Evaluator will automatically be run on all new Logs
-within the Evaluator for monitoring purposes.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -6670,8 +9976,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_monitoring(
- id="id",
+client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
```
@@ -6688,7 +9995,7 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6696,9 +10003,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6706,9 +10011,15 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6728,8 +10039,7 @@ client.evaluators.update_monitoring(
-## Flows
-client.flows.log(...)
+client.agents.list(...)
-
@@ -6741,13 +10051,7 @@ client.evaluators.update_monitoring(
-
-Log to a Flow.
-
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
+Get a list of all Agents.
@@ -6762,41 +10066,12 @@ in order to trigger Evaluators.
-
```python
-import datetime
-
from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
- ),
-)
+client.agents.list()
```
@@ -6812,7 +10087,7 @@ client.flows.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+**page:** `typing.Optional[int]` — Page number for pagination.
@@ -6820,7 +10095,7 @@ client.flows.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
@@ -6828,7 +10103,7 @@ client.flows.log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
@@ -6836,7 +10111,7 @@ client.flows.log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -6844,7 +10119,7 @@ client.flows.log(
-
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -6852,7 +10127,7 @@ client.flows.log(
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6860,71 +10135,76 @@ client.flows.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
-
+
+client.agents.upsert(...)
-
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
-
-
-
+#### 📝 Description
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
-
-
-
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+Create an Agent or update it with a new version if it already exists.
+
+Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+tools determine the versions of the Agent.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Agent - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.upsert(
+ model="model",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
-
-
-
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -6932,7 +10212,7 @@ client.flows.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6940,7 +10220,7 @@ client.flows.log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6948,7 +10228,7 @@ client.flows.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -6956,7 +10236,14 @@ client.flows.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**template:** `typing.Optional[AgentRequestTemplateParams]`
+
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+For completion models, provide a prompt template as a string.
+
+Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -6964,7 +10251,7 @@ client.flows.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -6972,7 +10259,7 @@ client.flows.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -6980,7 +10267,7 @@ client.flows.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
@@ -6988,7 +10275,7 @@ client.flows.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
@@ -6996,7 +10283,7 @@ client.flows.log(
-
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
@@ -7004,7 +10291,7 @@ client.flows.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
@@ -7012,7 +10299,7 @@ client.flows.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
@@ -7020,7 +10307,7 @@ client.flows.log(
-
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
@@ -7028,79 +10315,47 @@ client.flows.log(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
-
-
+
+-
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
-
-client.flows.update_log(...)
-
-#### 📝 Description
-
-
--
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+
+
-
-Update the status, inputs, output of a Flow Log.
-
-Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
-Inputs and output (or error) must be provided in order to mark it as complete.
-
-The end_time log attribute will be set to match the time the log is marked as complete.
-
-
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
-)
-
-```
-
-
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
+
-#### ⚙️ Parameters
-
-
--
-
-
-**log_id:** `str` — Unique identifier of the Flow Log.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
@@ -7108,7 +10363,7 @@ client.flows.update_log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
@@ -7116,7 +10371,7 @@ client.flows.update_log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
@@ -7124,7 +10379,7 @@ client.flows.update_log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -7132,7 +10387,7 @@ client.flows.update_log(
-
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7140,7 +10395,7 @@ client.flows.update_log(
-
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7148,7 +10403,7 @@ client.flows.update_log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7168,7 +10423,7 @@ client.flows.update_log(
-client.flows.get(...)
+client.agents.delete_agent_version(...)
-
@@ -7180,10 +10435,7 @@ client.flows.update_log(
-
-Retrieve the Flow with the given ID.
-
-By default, the deployed version of the Flow is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Flow.
+Delete a version of the Agent.
@@ -7203,8 +10455,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7221,15 +10474,7 @@ client.flows.get(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+**id:** `str` — Unique identifier for Agent.
@@ -7237,7 +10482,7 @@ client.flows.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7257,7 +10502,7 @@ client.flows.get(
-client.flows.delete(...)
+client.agents.patch_agent_version(...)
-
@@ -7269,7 +10514,7 @@ client.flows.get(
-
-Delete the Flow with the given ID.
+Update the name or description of the Agent version.
@@ -7289,8 +10534,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7307,7 +10553,31 @@ client.flows.delete(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -7327,7 +10597,7 @@ client.flows.delete(
-client.flows.move(...)
+client.agents.get(...)
-
@@ -7339,7 +10609,10 @@ client.flows.delete(
-
-Move the Flow to a different path or change the name.
+Retrieve the Agent with the given ID.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -7359,9 +10632,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
+client.agents.get(
+ id="id",
)
```
@@ -7378,15 +10650,7 @@ client.flows.move(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Agent.
@@ -7394,7 +10658,7 @@ client.flows.move(
-
-**name:** `typing.Optional[str]` — Name of the Flow.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
@@ -7402,7 +10666,7 @@ client.flows.move(
-
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -7422,7 +10686,7 @@ client.flows.move(
-client.flows.list(...)
+client.agents.delete(...)
-
@@ -7434,7 +10698,7 @@ client.flows.move(
-
-Get a list of Flows.
+Delete the Agent with the given ID.
@@ -7454,14 +10718,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.flows.list(
- size=1,
+client.agents.delete(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -7477,47 +10736,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
--
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
-
-
-
-
--
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
-
-
-
-
--
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
-
-
-
-
--
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10756,7 @@ for page in response.iter_pages():
-client.flows.upsert(...)
+client.agents.move(...)
-
@@ -7549,13 +10768,7 @@ for page in response.iter_pages():
-
-Create or update a Flow.
-
-Flows can also be identified by the `ID` or their `path`.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Flow - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Move the Agent to a different path or change the name.
@@ -7575,22 +10788,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
+client.agents.move(
+ id="id",
)
```
@@ -7607,15 +10806,7 @@ client.flows.upsert(
-
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10814,7 @@ client.flows.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10822,7 @@ client.flows.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10830,7 @@ client.flows.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10850,7 @@ client.flows.upsert(
-client.flows.list_versions(...)
+client.agents.list_versions(...)
-
@@ -7671,7 +10862,7 @@ client.flows.upsert(
-
-Get a list of all the versions of a Flow.
+Get a list of all the versions of a Agent.
@@ -7691,8 +10882,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.list_versions(
+ id="id",
)
```
@@ -7709,7 +10900,7 @@ client.flows.list_versions(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +10928,7 @@ client.flows.list_versions(
-client.flows.delete_flow_version(...)
+client.agents.set_deployment(...)
-
@@ -7749,7 +10940,10 @@ client.flows.list_versions(
-
-Delete a version of the Flow.
+Deploy Agent to an Environment.
+
+Set the deployed version for the specified Environment. This Agent
+will be used for calls made to the Agent in this Environment.
@@ -7769,8 +10963,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete_flow_version(
+client.agents.set_deployment(
id="id",
+ environment_id="environment_id",
version_id="version_id",
)
@@ -7788,7 +10983,7 @@ client.flows.delete_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +10991,15 @@ client.flows.delete_flow_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +11019,7 @@ client.flows.delete_flow_version(
-client.flows.update_flow_version(...)
+client.agents.remove_deployment(...)
-
@@ -7828,7 +11031,10 @@ client.flows.delete_flow_version(
-
-Update the name or description of the Flow version.
+Remove deployed Agent from the Environment.
+
+Remove the deployed version for the specified Environment. This Agent
+will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11054,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_flow_version(
+client.agents.remove_deployment(
id="id",
- version_id="version_id",
+ environment_id="environment_id",
)
```
@@ -7867,23 +11073,7 @@ client.flows.update_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
+**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11081,7 @@ client.flows.update_flow_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11101,7 @@ client.flows.update_flow_version(
-client.flows.set_deployment(...)
+client.agents.list_environments(...)
-
@@ -7923,10 +11113,7 @@ client.flows.update_flow_version(
-
-Deploy Flow to an Environment.
-
-Set the deployed version for the specified Environment. This Flow
-will be used for calls made to the Flow in this Environment.
+List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11133,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
+client.agents.list_environments(
+ id="id",
)
```
@@ -7966,23 +11151,7 @@ client.flows.set_deployment(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11171,7 @@ client.flows.set_deployment(
-client.flows.remove_deployment(...)
+client.agents.update_monitoring(...)
-
@@ -8014,10 +11183,10 @@ client.flows.set_deployment(
-
-Remove deployed Flow from the Environment.
+Activate and deactivate Evaluators for monitoring the Agent.
-Remove the deployed version for the specified Environment. This Flow
-will no longer be used for calls made to the Flow in this Environment.
+An activated Evaluator will automatically be run on all new Logs
+within the Agent for monitoring purposes.
@@ -8037,9 +11206,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
+client.agents.update_monitoring(
+ id="id",
)
```
@@ -8056,7 +11224,7 @@ client.flows.remove_deployment(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str`
@@ -8064,7 +11232,19 @@ client.flows.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11264,7 @@ client.flows.remove_deployment(
-client.flows.list_environments(...)
+client.agents.serialize(...)
-
@@ -8096,7 +11276,13 @@ client.flows.remove_deployment(
-
-List all Environments and their deployed versions for the Flow.
+Serialize an Agent to the .agent file format.
+
+Useful for storing the Agent with your code in a version control system,
+or for editing with an AI tool.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11302,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.serialize(
+ id="id",
)
```
@@ -8134,7 +11320,23 @@ client.flows.list_environments(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11356,7 @@ client.flows.list_environments(
-client.flows.update_monitoring(...)
+client.agents.deserialize(...)
-
@@ -8166,10 +11368,10 @@ client.flows.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Flow.
+Deserialize an Agent from the .agent file format.
-An activated Evaluator will automatically be run on all new "completed" Logs
-within the Flow for monitoring purposes.
+This returns a subset of the attributes required by an Agent.
+This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11391,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.agents.deserialize(
+ agent="agent",
)
```
@@ -8208,27 +11409,7 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
-
-
-
-
--
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
-
-
-
-
--
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**agent:** `str`
@@ -8742,6 +11923,14 @@ client.files.list_files()
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -10190,7 +13379,7 @@ for page in response.iter_pages():
-
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 0c431892..2ad9d39e 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,16 +1,45 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentCallResponse,
+ AgentCallResponseToolChoice,
+ AgentCallStreamResponse,
+ AgentCallStreamResponsePayload,
AgentConfigResponse,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
+ AgentInlineTool,
+ AgentKernelRequest,
+ AgentKernelRequestReasoningEffort,
+ AgentKernelRequestStop,
+ AgentKernelRequestTemplate,
+ AgentKernelRequestToolsItem,
+ AgentLinkedFileRequest,
+ AgentLinkedFileResponse,
+ AgentLinkedFileResponseFile,
+ AgentLogResponse,
+ AgentLogResponseToolChoice,
+ AgentLogStreamResponse,
+ AgentResponse,
+ AgentResponseReasoningEffort,
+ AgentResponseStop,
+ AgentResponseTemplate,
+ AgentResponseToolsItem,
+ AnthropicRedactedThinkingContent,
+ AnthropicThinkingContent,
BaseModelsUserResponse,
BooleanEvaluatorStatsResponse,
ChatMessage,
ChatMessageContent,
ChatMessageContentItem,
+ ChatMessageThinkingItem,
ChatRole,
ChatToolType,
CodeEvaluatorRequest,
ConfigToolResponse,
+ CreateAgentLogResponse,
CreateDatapointRequest,
CreateDatapointRequestTargetValue,
CreateEvaluatorLogResponse,
@@ -55,10 +84,12 @@
EvaluatorReturnTypeEnum,
EvaluatorVersionId,
EvaluatorsRequest,
+ EventType,
ExternalEvaluatorRequest,
FeedbackType,
FileEnvironmentResponse,
FileEnvironmentResponseFile,
+ FileEnvironmentVariableRequest,
FileId,
FilePath,
FileRequest,
@@ -76,7 +107,9 @@
ImageUrl,
ImageUrlDetail,
InputResponse,
+ LinkedFileRequest,
LinkedToolResponse,
+ ListAgents,
ListDatasets,
ListEvaluators,
ListFlows,
@@ -85,6 +118,7 @@
LlmEvaluatorRequest,
LogResponse,
LogStatus,
+ LogStreamResponse,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -93,15 +127,18 @@
MonitoringEvaluatorVersionRequest,
NumericEvaluatorStatsResponse,
ObservabilityStatus,
+ OnAgentCallEnum,
+ OpenAiReasoningEffort,
OverallStats,
+ PaginatedDataAgentResponse,
PaginatedDataEvaluationLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
PaginatedDataPromptResponse,
PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
PaginatedDatapointResponse,
PaginatedDatasetResponse,
PaginatedEvaluationResponse,
@@ -110,6 +147,7 @@
PlatformAccessEnum,
PopulateTemplateResponse,
PopulateTemplateResponsePopulatedTemplate,
+ PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
ProjectSortBy,
@@ -118,15 +156,16 @@
PromptCallResponseToolChoice,
PromptCallStreamResponse,
PromptKernelRequest,
+ PromptKernelRequestReasoningEffort,
PromptKernelRequestStop,
PromptKernelRequestTemplate,
PromptLogResponse,
PromptLogResponseToolChoice,
PromptResponse,
+ PromptResponseReasoningEffort,
PromptResponseStop,
PromptResponseTemplate,
ProviderApiKeys,
- ReasoningEffort,
ResponseFormat,
ResponseFormatType,
RunStatsResponse,
@@ -139,6 +178,7 @@
TextEvaluatorStatsResponse,
TimeUnit,
ToolCall,
+ ToolCallResponse,
ToolChoice,
ToolFunction,
ToolKernelRequest,
@@ -162,7 +202,29 @@
VersionStatus,
)
from .errors import UnprocessableEntityError
-from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoice,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffort,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStop,
+ AgentRequestStopParams,
+ AgentRequestTemplate,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItem,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoice,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoice,
+ AgentsCallStreamRequestToolChoiceParams,
+)
from .client import AsyncHumanloop, Humanloop
from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
from .environment import HumanloopEnvironment
@@ -186,26 +248,63 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffort,
+ PromptRequestReasoningEffortParams,
PromptRequestStop,
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
from .requests import (
+ AgentCallResponseParams,
+ AgentCallResponseToolChoiceParams,
+ AgentCallStreamResponseParams,
+ AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
+ AgentInlineToolParams,
+ AgentKernelRequestParams,
+ AgentKernelRequestReasoningEffortParams,
+ AgentKernelRequestStopParams,
+ AgentKernelRequestTemplateParams,
+ AgentKernelRequestToolsItemParams,
+ AgentLinkedFileRequestParams,
+ AgentLinkedFileResponseFileParams,
+ AgentLinkedFileResponseParams,
+ AgentLogResponseParams,
+ AgentLogResponseToolChoiceParams,
+ AgentLogStreamResponseParams,
+ AgentResponseParams,
+ AgentResponseReasoningEffortParams,
+ AgentResponseStopParams,
+ AgentResponseTemplateParams,
+ AgentResponseToolsItemParams,
+ AnthropicRedactedThinkingContentParams,
+ AnthropicThinkingContentParams,
BooleanEvaluatorStatsResponseParams,
ChatMessageContentItemParams,
ChatMessageContentParams,
ChatMessageParams,
+ ChatMessageThinkingItemParams,
CodeEvaluatorRequestParams,
+ CreateAgentLogResponseParams,
CreateDatapointRequestParams,
CreateDatapointRequestTargetValueParams,
CreateEvaluatorLogResponseParams,
@@ -245,6 +344,7 @@
ExternalEvaluatorRequestParams,
FileEnvironmentResponseFileParams,
FileEnvironmentResponseParams,
+ FileEnvironmentVariableRequestParams,
FileIdParams,
FilePathParams,
FileRequestParams,
@@ -258,7 +358,9 @@
ImageChatContentParams,
ImageUrlParams,
InputResponseParams,
+ LinkedFileRequestParams,
LinkedToolResponseParams,
+ ListAgentsParams,
ListDatasetsParams,
ListEvaluatorsParams,
ListFlowsParams,
@@ -266,24 +368,27 @@
ListToolsParams,
LlmEvaluatorRequestParams,
LogResponseParams,
+ LogStreamResponseParams,
MonitoringEvaluatorEnvironmentRequestParams,
MonitoringEvaluatorResponseParams,
MonitoringEvaluatorVersionRequestParams,
NumericEvaluatorStatsResponseParams,
OverallStatsParams,
+ PaginatedDataAgentResponseParams,
PaginatedDataEvaluationLogResponseParams,
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
PaginatedDataPromptResponseParams,
PaginatedDataToolResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
PaginatedDatapointResponseParams,
PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PopulateTemplateResponseParams,
PopulateTemplateResponsePopulatedTemplateParams,
+ PopulateTemplateResponseReasoningEffortParams,
PopulateTemplateResponseStopParams,
PopulateTemplateResponseTemplateParams,
PromptCallLogResponseParams,
@@ -291,11 +396,13 @@
PromptCallResponseToolChoiceParams,
PromptCallStreamResponseParams,
PromptKernelRequestParams,
+ PromptKernelRequestReasoningEffortParams,
PromptKernelRequestStopParams,
PromptKernelRequestTemplateParams,
PromptLogResponseParams,
PromptLogResponseToolChoiceParams,
PromptResponseParams,
+ PromptResponseReasoningEffortParams,
PromptResponseStopParams,
PromptResponseTemplateParams,
ProviderApiKeysParams,
@@ -307,6 +414,7 @@
TextChatContentParams,
TextEvaluatorStatsResponseParams,
ToolCallParams,
+ ToolCallResponseParams,
ToolChoiceParams,
ToolFunctionParams,
ToolKernelRequestParams,
@@ -329,8 +437,82 @@
__all__ = [
"AddEvaluatorsRequestEvaluatorsItem",
"AddEvaluatorsRequestEvaluatorsItemParams",
+ "AgentCallResponse",
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoice",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayload",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineTool",
+ "AgentInlineToolParams",
+ "AgentKernelRequest",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItem",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentLogResponse",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoice",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponse",
+ "AgentLogStreamResponseParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentResponse",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffort",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStop",
+ "AgentResponseStopParams",
+ "AgentResponseTemplate",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItem",
+ "AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContent",
+ "AnthropicThinkingContentParams",
"AsyncHumanloop",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
@@ -341,11 +523,15 @@
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItem",
+ "ChatMessageThinkingItemParams",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"CodeEvaluatorRequestParams",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequest",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValue",
@@ -438,6 +624,7 @@
"EvaluatorVersionId",
"EvaluatorVersionIdParams",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"ExternalEvaluatorRequestParams",
"FeedbackType",
@@ -445,6 +632,8 @@
"FileEnvironmentResponseFile",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequest",
+ "FileEnvironmentVariableRequestParams",
"FileId",
"FileIdParams",
"FilePath",
@@ -477,8 +666,12 @@
"ImageUrlParams",
"InputResponse",
"InputResponseParams",
+ "LinkedFileRequest",
+ "LinkedFileRequestParams",
"LinkedToolResponse",
"LinkedToolResponseParams",
+ "ListAgents",
+ "ListAgentsParams",
"ListDatasets",
"ListDatasetsParams",
"ListEvaluators",
@@ -495,6 +688,8 @@
"LogResponse",
"LogResponseParams",
"LogStatus",
+ "LogStreamResponse",
+ "LogStreamResponseParams",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -507,8 +702,12 @@
"NumericEvaluatorStatsResponse",
"NumericEvaluatorStatsResponseParams",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
"OverallStatsParams",
+ "PaginatedDataAgentResponse",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponse",
@@ -521,10 +720,10 @@
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponse",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponse",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponse",
@@ -538,6 +737,8 @@
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplate",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffort",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
@@ -553,10 +754,14 @@
"PromptCallStreamResponseParams",
"PromptKernelRequest",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffort",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStop",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -565,23 +770,30 @@
"PromptLogResponseToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
"PromptResponse",
"PromptResponseParams",
+ "PromptResponseReasoningEffort",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStop",
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
"ProviderApiKeysParams",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatParams",
"ResponseFormatType",
@@ -604,6 +816,8 @@
"TimeUnit",
"ToolCall",
"ToolCallParams",
+ "ToolCallResponse",
+ "ToolCallResponseParams",
"ToolChoice",
"ToolChoiceParams",
"ToolFunction",
@@ -643,6 +857,7 @@
"VersionStatsResponseParams",
"VersionStatus",
"__version__",
+ "agents",
"datasets",
"directories",
"evaluations",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
new file mode 100644
index 00000000..ab2a2f9e
--- /dev/null
+++ b/src/humanloop/agents/__init__.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AgentLogRequestAgent,
+ AgentLogRequestToolChoice,
+ AgentRequestReasoningEffort,
+ AgentRequestStop,
+ AgentRequestTemplate,
+ AgentRequestToolsItem,
+ AgentsCallRequestAgent,
+ AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestToolChoice,
+)
+from .requests import (
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStopParams,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoiceParams,
+)
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
new file mode 100644
index 00000000..b43fd7be
--- /dev/null
+++ b/src/humanloop/agents/client.py
@@ -0,0 +1,3208 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .raw_client import RawAgentsClient
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.log_response import LogResponse
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from .raw_client import AsyncRawAgentsClient
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._raw_client = RawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> RawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ RawAgentsClient
+ """
+ return self._raw_client
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.log()
+ """
+ response = self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+ """
+ response = self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ """
+ response = self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.upsert(
+ model="model",
+ )
+ """
+ response = self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.get(
+ id="id",
+ )
+ """
+ response = self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete(
+ id="id",
+ )
+ """
+ response = self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.move(
+ id="id",
+ )
+ """
+ response = self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_versions(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+ """
+ response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_environments(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_monitoring(
+ id="id",
+ )
+ """
+ response = self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.deserialize(
+ agent="agent",
+ )
+ """
+ response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
+
+
+class AsyncAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> AsyncRawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ AsyncRawAgentsClient
+ """
+ return self._raw_client
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.log()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.upsert(
+ model="model",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.get(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.move(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_versions(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_environments(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_monitoring(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.deserialize(
+ agent="agent",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
new file mode 100644
index 00000000..08e04bab
--- /dev/null
+++ b/src/humanloop/agents/raw_client.py
@@ -0,0 +1,3889 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..core.http_response import HttpResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.log_response import LogResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+import httpx_sse
+import contextlib
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from ..core.http_response import AsyncHttpResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RawAgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentContinueResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[None]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentContinueResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[None]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
new file mode 100644
index 00000000..06ce37ed
--- /dev/null
+++ b/src/humanloop/agents/requests/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgentParams
+from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .agent_request_stop import AgentRequestStopParams
+from .agent_request_template import AgentRequestTemplateParams
+from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+
+__all__ = [
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStopParams",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..584112aa
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentLogRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..98a991cd
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py
new file mode 100644
index 00000000..3970451c
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
new file mode 100644
index 00000000..c251ce8e
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.chat_message import ChatMessageParams
+
+AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
new file mode 100644
index 00000000..20cde136
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+from ...requests.agent_inline_tool import AgentInlineToolParams
+
+AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..1e468fa0
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..bd068b6f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallStreamRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
new file mode 100644
index 00000000..9c8a955c
--- /dev/null
+++ b/src/humanloop/agents/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgent
+from .agent_log_request_tool_choice import AgentLogRequestToolChoice
+from .agent_request_reasoning_effort import AgentRequestReasoningEffort
+from .agent_request_stop import AgentRequestStop
+from .agent_request_template import AgentRequestTemplate
+from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestToolChoice",
+ "AgentRequestReasoningEffort",
+ "AgentRequestStop",
+ "AgentRequestTemplate",
+ "AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestToolChoice",
+]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..bfb576c2
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentLogRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..b4267202
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py
new file mode 100644
index 00000000..325a6b2e
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
new file mode 100644
index 00000000..f6474824
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.chat_message import ChatMessage
+
+AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
new file mode 100644
index 00000000..e6c54b88
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
+from ...types.agent_inline_tool import AgentInlineTool
+
+AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..6dee5a04
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..83d264f0
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallStreamRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index bf72be6a..a11298b8 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -11,6 +11,7 @@
from .datasets.client import DatasetsClient
from .evaluators.client import EvaluatorsClient
from .flows.client import FlowsClient
+from .agents.client import AgentsClient
from .directories.client import DirectoriesClient
from .files.client import FilesClient
from .evaluations.client import EvaluationsClient
@@ -21,6 +22,7 @@
from .datasets.client import AsyncDatasetsClient
from .evaluators.client import AsyncEvaluatorsClient
from .flows.client import AsyncFlowsClient
+from .agents.client import AsyncAgentsClient
from .directories.client import AsyncDirectoriesClient
from .files.client import AsyncFilesClient
from .evaluations.client import AsyncEvaluationsClient
@@ -96,6 +98,7 @@ def __init__(
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = FlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AgentsClient(client_wrapper=self._client_wrapper)
self.directories = DirectoriesClient(client_wrapper=self._client_wrapper)
self.files = FilesClient(client_wrapper=self._client_wrapper)
self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper)
@@ -171,6 +174,7 @@ def __init__(
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper)
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper)
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index f25dc2ca..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.35",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.35",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index c07358d0..3f97ee92 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -7,8 +7,8 @@
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -44,8 +44,9 @@ def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -75,12 +76,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -101,6 +105,7 @@ def list_files(
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -174,8 +179,9 @@ async def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -205,12 +211,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -239,6 +248,7 @@ async def main() -> None:
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 19f52cf2..2d30dac9 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -7,8 +7,8 @@
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -38,8 +38,11 @@ def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> HttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -69,12 +72,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -89,15 +95,16 @@ def list_files(
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -199,8 +206,11 @@ async def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> AsyncHttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -230,12 +240,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -250,15 +263,16 @@ async def list_files(
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index c1618edb..8c070ab3 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,13 @@
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
+from ...requests.agent_response import AgentResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 48415fc9..46ea271a 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,8 @@
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
+from ...types.agent_response import AgentResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index a11776fc..bcb9491c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -214,10 +214,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1128,10 +1128,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 17007c1b..b16d1f6b 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -99,7 +99,7 @@ def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -338,7 +338,7 @@ async def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index c1147ff2..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,33 +1,49 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
+ PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e2fff4c3..7bc0fd72 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,9 +13,11 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
@@ -33,7 +35,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.populate_template_response import PopulateTemplateResponse
from ..types.list_prompts import ListPrompts
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawPromptsClient
from ..core.pagination import AsyncPager
@@ -84,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -165,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -256,7 +262,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -479,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -537,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -648,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -706,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -962,7 +974,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -1037,8 +1049,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1599,6 +1611,92 @@ def update_monitoring(
)
return response.data
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.deserialize(
+ prompt="prompt",
+ )
+ """
+ response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
+
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1632,7 +1730,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1713,8 +1811,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1810,7 +1911,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2044,7 +2145,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2102,8 +2203,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2222,7 +2326,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2280,8 +2384,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2552,7 +2659,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2627,8 +2734,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -3284,3 +3391,105 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> None:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.deserialize(
+ prompt="prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index b5334c82..e0700ae7 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -32,7 +34,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.prompt_response import PromptResponse
from ..types.populate_template_response import PopulateTemplateResponse
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -72,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -153,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -248,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -495,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -553,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -632,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -705,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -763,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -842,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -915,7 +927,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -990,8 +1002,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1051,7 +1063,9 @@ def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -1744,6 +1758,126 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[None]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncRawPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1766,7 +1900,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1847,8 +1981,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1942,7 +2079,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2189,7 +2326,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2247,8 +2384,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2326,7 +2466,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2399,7 +2539,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2457,8 +2597,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2536,7 +2679,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2609,7 +2752,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2684,8 +2827,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -2745,7 +2888,9 @@ async def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -3439,3 +3584,123 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[None]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index c5119552..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..080a107e
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 644cf6b5..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
+ "PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..33f35288
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index bd9458ba..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,11 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponseParams
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_call_stream_response import AgentCallStreamResponseParams
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from .agent_inline_tool import AgentInlineToolParams
+from .agent_kernel_request import AgentKernelRequestParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_linked_file_response import AgentLinkedFileResponseParams
+from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+from .agent_log_response import AgentLogResponseParams
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+from .agent_response import AgentResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .agent_response_tools_item import AgentResponseToolsItemParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
from .chat_message import ChatMessageParams
from .chat_message_content import ChatMessageContentParams
from .chat_message_content_item import ChatMessageContentItemParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
from .code_evaluator_request import CodeEvaluatorRequestParams
+from .create_agent_log_response import CreateAgentLogResponseParams
from .create_datapoint_request import CreateDatapointRequestParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
from .create_evaluator_log_response import CreateEvaluatorLogResponseParams
@@ -51,6 +80,7 @@
from .external_evaluator_request import ExternalEvaluatorRequestParams
from .file_environment_response import FileEnvironmentResponseParams
from .file_environment_response_file import FileEnvironmentResponseFileParams
+from .file_environment_variable_request import FileEnvironmentVariableRequestParams
from .file_id import FileIdParams
from .file_path import FilePathParams
from .file_request import FileRequestParams
@@ -64,7 +94,9 @@
from .image_chat_content import ImageChatContentParams
from .image_url import ImageUrlParams
from .input_response import InputResponseParams
+from .linked_file_request import LinkedFileRequestParams
from .linked_tool_response import LinkedToolResponseParams
+from .list_agents import ListAgentsParams
from .list_datasets import ListDatasetsParams
from .list_evaluators import ListEvaluatorsParams
from .list_flows import ListFlowsParams
@@ -72,28 +104,31 @@
from .list_tools import ListToolsParams
from .llm_evaluator_request import LlmEvaluatorRequestParams
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .overall_stats import OverallStatsParams
+from .paginated_data_agent_response import PaginatedDataAgentResponseParams
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams
from .paginated_data_flow_response import PaginatedDataFlowResponseParams
from .paginated_data_log_response import PaginatedDataLogResponseParams
from .paginated_data_prompt_response import PaginatedDataPromptResponseParams
from .paginated_data_tool_response import PaginatedDataToolResponseParams
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
from .paginated_datapoint_response import PaginatedDatapointResponseParams
from .paginated_dataset_response import PaginatedDatasetResponseParams
from .paginated_evaluation_response import PaginatedEvaluationResponseParams
from .populate_template_response import PopulateTemplateResponseParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .populate_template_response_stop import PopulateTemplateResponseStopParams
from .populate_template_response_template import PopulateTemplateResponseTemplateParams
from .prompt_call_log_response import PromptCallLogResponseParams
@@ -101,11 +136,13 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_call_stream_response import PromptCallStreamResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .prompt_log_response import PromptLogResponseParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
from .prompt_response_template import PromptResponseTemplateParams
from .provider_api_keys import ProviderApiKeysParams
@@ -117,6 +154,7 @@
from .text_chat_content import TextChatContentParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
from .tool_call import ToolCallParams
+from .tool_call_response import ToolCallResponseParams
from .tool_choice import ToolChoiceParams
from .tool_function import ToolFunctionParams
from .tool_kernel_request import ToolKernelRequestParams
@@ -135,12 +173,41 @@
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
__all__ = [
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineToolParams",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponseParams",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStopParams",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItemParams",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContentParams",
"BooleanEvaluatorStatsResponseParams",
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItemParams",
"CodeEvaluatorRequestParams",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValueParams",
"CreateEvaluatorLogResponseParams",
@@ -180,6 +247,7 @@
"ExternalEvaluatorRequestParams",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequestParams",
"FileIdParams",
"FilePathParams",
"FileRequestParams",
@@ -193,7 +261,9 @@
"ImageChatContentParams",
"ImageUrlParams",
"InputResponseParams",
+ "LinkedFileRequestParams",
"LinkedToolResponseParams",
+ "ListAgentsParams",
"ListDatasetsParams",
"ListEvaluatorsParams",
"ListFlowsParams",
@@ -201,24 +271,27 @@
"ListToolsParams",
"LlmEvaluatorRequestParams",
"LogResponseParams",
+ "LogStreamResponseParams",
"MonitoringEvaluatorEnvironmentRequestParams",
"MonitoringEvaluatorResponseParams",
"MonitoringEvaluatorVersionRequestParams",
"NumericEvaluatorStatsResponseParams",
"OverallStatsParams",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponseParams",
"PaginatedDataFlowResponseParams",
"PaginatedDataLogResponseParams",
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponseParams",
"PaginatedEvaluationResponseParams",
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplateParams",
"PromptCallLogResponseParams",
@@ -226,11 +299,13 @@
"PromptCallResponseToolChoiceParams",
"PromptCallStreamResponseParams",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplateParams",
"PromptLogResponseParams",
"PromptLogResponseToolChoiceParams",
"PromptResponseParams",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStopParams",
"PromptResponseTemplateParams",
"ProviderApiKeysParams",
@@ -242,6 +317,7 @@
"TextChatContentParams",
"TextEvaluatorStatsResponseParams",
"ToolCallParams",
+ "ToolCallResponseParams",
"ToolChoiceParams",
"ToolFunctionParams",
"ToolKernelRequestParams",
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
new file mode 100644
index 00000000..ffc925ec
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..6cc9f9c4
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
new file mode 100644
index 00000000..9555925d
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..0e08a6f3
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
new file mode 100644
index 00000000..8300667b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..24b044cc
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
new file mode 100644
index 00000000..1038e000
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..ddd74c10
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
new file mode 100644
index 00000000..31f9401a
--- /dev/null
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .tool_function import ToolFunctionParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentInlineToolParams(typing_extensions.TypedDict):
+ type: typing.Literal["inline"]
+ json_schema: ToolFunctionParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
new file mode 100644
index 00000000..0ca76571
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+
+
+class AgentKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentKernelRequestStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]]
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..ea32bc11
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py
new file mode 100644
index 00000000..eae95d35
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
new file mode 100644
index 00000000..7261667d
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..27b63984
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_inline_tool import AgentInlineToolParams
+
+AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
new file mode 100644
index 00000000..18fc2274
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
new file mode 100644
index 00000000..8a690a77
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+
+
+class AgentLinkedFileResponseParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
+ file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"]
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
new file mode 100644
index 00000000..bb328de2
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponseParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
+ from .evaluator_response import EvaluatorResponseParams
+ from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
+AgentLinkedFileResponseFileParams = typing.Union[
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
+]
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
new file mode 100644
index 00000000..0cb24b8a
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response.py
@@ -0,0 +1,201 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+import typing
+
+if typing.TYPE_CHECKING:
+ from .evaluator_log_response import EvaluatorLogResponseParams
+ from .log_response import LogResponseParams
+
+
+class AgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..e239a69c
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentLogResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
new file mode 100644
index 00000000..710d55cf
--- /dev/null
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .chat_message import ChatMessageParams
+
+
+class AgentLogStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ agent_id: str
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str
+ """
+ ID of the specific version of the Agent.
+ """
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
new file mode 100644
index 00000000..f482728d
--- /dev/null
+++ b/src/humanloop/requests/agent_response.py
@@ -0,0 +1,242 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .environment_response import EnvironmentResponseParams
+import datetime as dt
+from ..types.user_response import UserResponse
+from ..types.version_status import VersionStatus
+from .input_response import InputResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_response_tools_item import AgentResponseToolsItemParams
+ from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
+
+
+class AgentResponseParams(typing_extensions.TypedDict):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing_extensions.NotRequired[str]
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentResponseTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentResponseStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Sequence["AgentResponseToolsItemParams"]
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing_extensions.NotRequired[str]
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing_extensions.NotRequired[str]
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Description of the Agent.
+ """
+
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing_extensions.NotRequired[str]
+ """
+ Long description of the file.
+ """
+
+ name: str
+ """
+ Name of the Agent.
+ """
+
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing_extensions.NotRequired[typing.Literal["agent"]]
+ environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]]
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.Sequence[InputResponseParams]
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]]
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]]
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..de1b969f
--- /dev/null
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py
new file mode 100644
index 00000000..a395ee73
--- /dev/null
+++ b/src/humanloop/requests/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
new file mode 100644
index 00000000..94be65f1
--- /dev/null
+++ b/src/humanloop/requests/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
new file mode 100644
index 00000000..5181579b
--- /dev/null
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineToolParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponseParams
+AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams]
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..3b328f7f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["redacted_thinking"]
+ data: str
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
new file mode 100644
index 00000000..34f6f99f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["thinking"]
+ thinking: str
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index cab8466d..6011653a 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -6,6 +6,7 @@
from ..types.chat_role import ChatRole
import typing
from .tool_call import ToolCallParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
class ChatMessageParams(typing_extensions.TypedDict):
@@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict):
"""
A list of tool calls requested by the assistant.
"""
+
+ thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]]
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
new file mode 100644
index 00000000..0691f4d8
--- /dev/null
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContentParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+
+ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
new file mode 100644
index 00000000..b1715517
--- /dev/null
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.log_status import LogStatus
+
+
+class CreateAgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1d59ed4b..1cffd2b2 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict):
Description of the Dataset.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index f101bf15..db9370b9 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,13 @@
from .evaluator_response import EvaluatorResponseParams
from .dataset_response import DatasetResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ EvaluatorResponseParams,
+ DatasetResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 908eeb2d..1ff836fb 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict):
Description of the Evaluator.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 4ac6b0c3..04c0b51d 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -6,7 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
FileEnvironmentResponseFileParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py
new file mode 100644
index 00000000..bb70bda4
--- /dev/null
+++ b/src/humanloop/requests/file_environment_variable_request.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict):
+ name: str
+ """
+ Name of the environment variable.
+ """
+
+ value: str
+ """
+ Value of the environment variable.
+ """
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 18a26d10..eebc9fd7 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict):
Description of the Flow.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
new file mode 100644
index 00000000..2bbba19c
--- /dev/null
+++ b/src/humanloop/requests/linked_file_request.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class LinkedFileRequestParams(typing_extensions.TypedDict):
+ file_id: str
+ environment_id: typing_extensions.NotRequired[str]
+ version_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
new file mode 100644
index 00000000..4a72f1db
--- /dev/null
+++ b/src/humanloop/requests/list_agents.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class ListAgentsParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ """
+ The list of Agents.
+ """
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index 15a4cff6..cb3ce212 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -9,6 +9,11 @@
from .tool_log_response import ToolLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
LogResponseParams = typing.Union[
- "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams"
+ "PromptLogResponseParams",
+ "ToolLogResponseParams",
+ "EvaluatorLogResponseParams",
+ "FlowLogResponseParams",
+ "AgentLogResponseParams",
]
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
new file mode 100644
index 00000000..e142e7fb
--- /dev/null
+++ b/src/humanloop/requests/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponseParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+
+LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
new file mode 100644
index 00000000..c8d67533
--- /dev/null
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class PaginatedDataAgentResponseParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ page: int
+ size: int
+ total: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 65%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index cf8bc4bf..0e7adb64 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -2,16 +2,16 @@
import typing_extensions
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams(
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams(
typing_extensions.TypedDict
):
records: typing.Sequence[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams
]
page: int
size: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 58%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 1ba74108..b43a5521 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,9 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = (
- typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
- ]
-)
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
+]
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 190341b0..40b62295 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -9,7 +9,7 @@
from .populate_template_response_stop import PopulateTemplateResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -213,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..6b1dd46a
--- /dev/null
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 61355166..1e4f56de 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -9,11 +9,17 @@
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
class PromptKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..0c3d194b
--- /dev/null
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index 912866c5..05b4a71e 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -10,7 +10,7 @@
from .prompt_response_stop import PromptResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -215,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..4d019051
--- /dev/null
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 879ea25c..569d0d76 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -5,7 +5,8 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
RunVersionResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
new file mode 100644
index 00000000..1c92b28f
--- /dev/null
+++ b/src/humanloop/requests/tool_call_response.py
@@ -0,0 +1,146 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .tool_response import ToolResponseParams
+import typing
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class ToolCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponseParams
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index bac9dbbb..1aa0daea 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -7,6 +7,7 @@
import typing
from ..types.log_status import LogStatus
from .tool_response import ToolResponseParams
+from .chat_message import ChatMessageParams
import typing
if typing.TYPE_CHECKING:
@@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
"""
Tool used to generate the Log.
"""
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the Tool.
+ """
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 8a16af00..9659cb49 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionDeploymentResponseFileParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 50ecf7bc..9c317679 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionIdResponseVersionParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index 16d75bd7..ea6b14a2 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -3,10 +3,11 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawToolsClient
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
+from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..types.project_sort_by import ProjectSortBy
@@ -29,6 +30,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawToolsClient
from ..core.pagination import AsyncPager
@@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient:
"""
return self._raw_client
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
def log(
self,
*,
@@ -59,6 +189,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -78,7 +209,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -106,6 +236,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -163,9 +296,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -206,6 +336,7 @@ def log(
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -225,7 +356,6 @@ def log(
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -966,6 +1096,112 @@ def update_monitoring(
)
return response.data
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.get_environment_variables(
+ id="id",
+ )
+ """
+ response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+ """
+ response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+ """
+ response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
+
class AsyncToolsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient:
"""
return self._raw_client
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
async def log(
self,
*,
@@ -989,6 +1360,7 @@ async def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1008,7 +1380,6 @@ async def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -1036,6 +1407,9 @@ async def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1093,9 +1467,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1144,6 +1515,7 @@ async def main() -> None:
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -1163,7 +1535,6 @@ async def main() -> None:
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -2010,3 +2381,133 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.get_environment_variables(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 4a1f29e9..b412b771 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -2,18 +2,19 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..requests.tool_function import ToolFunctionParams
@@ -27,6 +28,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -38,6 +41,159 @@ class RawToolsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def log(
self,
*,
@@ -45,6 +201,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -64,7 +221,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateToolLogResponse]:
"""
@@ -92,6 +248,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -149,9 +308,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -170,6 +326,9 @@ def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -189,9 +348,6 @@ def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -1038,75 +1194,387 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
-class AsyncRawToolsClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- async def log(
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_environment_variable(
self,
+ id: str,
*,
- version_id: typing.Optional[str] = None,
- environment: typing.Optional[str] = None,
- path: typing.Optional[str] = OMIT,
- id: typing.Optional[str] = OMIT,
- start_time: typing.Optional[dt.datetime] = OMIT,
- end_time: typing.Optional[dt.datetime] = OMIT,
- output: typing.Optional[str] = OMIT,
- created_at: typing.Optional[dt.datetime] = OMIT,
- error: typing.Optional[str] = OMIT,
- provider_latency: typing.Optional[float] = OMIT,
- stdout: typing.Optional[str] = OMIT,
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- source: typing.Optional[str] = OMIT,
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
- source_datapoint_id: typing.Optional[str] = OMIT,
- trace_parent_id: typing.Optional[str] = OMIT,
- user: typing.Optional[str] = OMIT,
- tool_log_request_environment: typing.Optional[str] = OMIT,
- save: typing.Optional[bool] = OMIT,
- log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
"""
- Log to a Tool.
-
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Tool. Otherwise the default deployed version will be chosen.
-
- Instead of targeting an existing version explicitly, you can instead pass in
- Tool details in the request body. In this case, we will check if the details correspond
- to an existing version of the Tool, if not we will create a new version. This is helpful
- in the case where you are storing or deriving your Tool details in code.
+ Add an environment variable to a Tool.
Parameters
----------
- version_id : typing.Optional[str]
- A specific Version ID of the Tool to log to.
-
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
-
- path : typing.Optional[str]
- Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
- id : typing.Optional[str]
- ID for an existing Tool.
-
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ id : str
+ Unique identifier for Tool.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- created_at : typing.Optional[dt.datetime]
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawToolsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ """
+ Log to a Tool.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool, if not we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
User defined timestamp for when the log was created.
error : typing.Optional[str]
@@ -1154,9 +1622,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1175,6 +1640,9 @@ async def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -1194,9 +1662,6 @@ async def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -2044,3 +2509,159 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 156f4e9a..8130325d 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,15 +1,44 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponse
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+from .agent_call_stream_response import AgentCallStreamResponse
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .agent_inline_tool import AgentInlineTool
+from .agent_kernel_request import AgentKernelRequest
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile
+from .agent_log_response import AgentLogResponse
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+from .agent_log_stream_response import AgentLogStreamResponse
+from .agent_response import AgentResponse
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
+from .agent_response_tools_item import AgentResponseToolsItem
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
from .base_models_user_response import BaseModelsUserResponse
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
from .chat_message import ChatMessage
from .chat_message_content import ChatMessageContent
from .chat_message_content_item import ChatMessageContentItem
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .chat_tool_type import ChatToolType
from .code_evaluator_request import CodeEvaluatorRequest
from .config_tool_response import ConfigToolResponse
+from .create_agent_log_response import CreateAgentLogResponse
from .create_datapoint_request import CreateDatapointRequest
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
from .create_evaluator_log_response import CreateEvaluatorLogResponse
@@ -56,10 +85,12 @@
from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .evaluator_version_id import EvaluatorVersionId
from .evaluators_request import EvaluatorsRequest
+from .event_type import EventType
from .external_evaluator_request import ExternalEvaluatorRequest
from .feedback_type import FeedbackType
from .file_environment_response import FileEnvironmentResponse
from .file_environment_response_file import FileEnvironmentResponseFile
+from .file_environment_variable_request import FileEnvironmentVariableRequest
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
@@ -77,7 +108,9 @@
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .input_response import InputResponse
+from .linked_file_request import LinkedFileRequest
from .linked_tool_response import LinkedToolResponse
+from .list_agents import ListAgents
from .list_datasets import ListDatasets
from .list_evaluators import ListEvaluators
from .list_flows import ListFlows
@@ -86,6 +119,7 @@
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
from .log_status import LogStatus
+from .log_stream_response import LogStreamResponse
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -94,18 +128,21 @@
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .observability_status import ObservabilityStatus
+from .on_agent_call_enum import OnAgentCallEnum
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
from .overall_stats import OverallStats
+from .paginated_data_agent_response import PaginatedDataAgentResponse
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from .paginated_data_flow_response import PaginatedDataFlowResponse
from .paginated_data_log_response import PaginatedDataLogResponse
from .paginated_data_prompt_response import PaginatedDataPromptResponse
from .paginated_data_tool_response import PaginatedDataToolResponse
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from .paginated_datapoint_response import PaginatedDatapointResponse
from .paginated_dataset_response import PaginatedDatasetResponse
@@ -115,6 +152,7 @@
from .platform_access_enum import PlatformAccessEnum
from .populate_template_response import PopulateTemplateResponse
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
from .project_sort_by import ProjectSortBy
@@ -123,15 +161,16 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .prompt_call_stream_response import PromptCallStreamResponse
from .prompt_kernel_request import PromptKernelRequest
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .prompt_log_response import PromptLogResponse
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
from .prompt_response import PromptResponse
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
from .prompt_response_template import PromptResponseTemplate
from .provider_api_keys import ProviderApiKeys
-from .reasoning_effort import ReasoningEffort
from .response_format import ResponseFormat
from .response_format_type import ResponseFormatType
from .run_stats_response import RunStatsResponse
@@ -144,6 +183,7 @@
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
from .time_unit import TimeUnit
from .tool_call import ToolCall
+from .tool_call_response import ToolCallResponse
from .tool_choice import ToolChoice
from .tool_function import ToolFunction
from .tool_kernel_request import ToolKernelRequest
@@ -167,16 +207,45 @@
from .version_status import VersionStatus
__all__ = [
+ "AgentCallResponse",
+ "AgentCallResponseToolChoice",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponsePayload",
"AgentConfigResponse",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
+ "AgentInlineTool",
+ "AgentKernelRequest",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestToolsItem",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLogResponse",
+ "AgentLogResponseToolChoice",
+ "AgentLogStreamResponse",
+ "AgentResponse",
+ "AgentResponseReasoningEffort",
+ "AgentResponseStop",
+ "AgentResponseTemplate",
+ "AgentResponseToolsItem",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicThinkingContent",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
"ChatMessage",
"ChatMessageContent",
"ChatMessageContentItem",
+ "ChatMessageThinkingItem",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
"CreateDatapointRequest",
"CreateDatapointRequestTargetValue",
"CreateEvaluatorLogResponse",
@@ -221,10 +290,12 @@
"EvaluatorReturnTypeEnum",
"EvaluatorVersionId",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"FeedbackType",
"FileEnvironmentResponse",
"FileEnvironmentResponseFile",
+ "FileEnvironmentVariableRequest",
"FileId",
"FilePath",
"FileRequest",
@@ -242,7 +313,9 @@
"ImageUrl",
"ImageUrlDetail",
"InputResponse",
+ "LinkedFileRequest",
"LinkedToolResponse",
+ "ListAgents",
"ListDatasets",
"ListEvaluators",
"ListFlows",
@@ -251,6 +324,7 @@
"LlmEvaluatorRequest",
"LogResponse",
"LogStatus",
+ "LogStreamResponse",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -259,15 +333,18 @@
"MonitoringEvaluatorVersionRequest",
"NumericEvaluatorStatsResponse",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
+ "PaginatedDataAgentResponse",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluatorResponse",
"PaginatedDataFlowResponse",
"PaginatedDataLogResponse",
"PaginatedDataPromptResponse",
"PaginatedDataToolResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
"PaginatedDatapointResponse",
"PaginatedDatasetResponse",
"PaginatedEvaluationResponse",
@@ -276,6 +353,7 @@
"PlatformAccessEnum",
"PopulateTemplateResponse",
"PopulateTemplateResponsePopulatedTemplate",
+ "PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
"ProjectSortBy",
@@ -284,15 +362,16 @@
"PromptCallResponseToolChoice",
"PromptCallStreamResponse",
"PromptKernelRequest",
+ "PromptKernelRequestReasoningEffort",
"PromptKernelRequestStop",
"PromptKernelRequestTemplate",
"PromptLogResponse",
"PromptLogResponseToolChoice",
"PromptResponse",
+ "PromptResponseReasoningEffort",
"PromptResponseStop",
"PromptResponseTemplate",
"ProviderApiKeys",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatType",
"RunStatsResponse",
@@ -305,6 +384,7 @@
"TextEvaluatorStatsResponse",
"TimeUnit",
"ToolCall",
+ "ToolCallResponse",
"ToolChoice",
"ToolFunction",
"ToolKernelRequest",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
new file mode 100644
index 00000000..ba3bbfec
--- /dev/null
+++ b/src/humanloop/types/agent_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..95eca73e
--- /dev/null
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
new file mode 100644
index 00000000..673d3738
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..85422047
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
new file mode 100644
index 00000000..0bbd7858
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..20f3fb75
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
new file mode 100644
index 00000000..ff7a0fac
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..0e5f8a58
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
new file mode 100644
index 00000000..dc618c35
--- /dev/null
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tool_function import ToolFunction
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentInlineTool(UncheckedBaseModel):
+ type: typing.Literal["inline"] = "inline"
+ json_schema: ToolFunction
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
new file mode 100644
index 00000000..6503b104
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -0,0 +1,122 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .response_format import ResponseFormat
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..a8e8e98b
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py
new file mode 100644
index 00000000..e38c12e2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
new file mode 100644
index 00000000..31a351f2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..82c2fecf
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_inline_tool import AgentInlineTool
+
+AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
new file mode 100644
index 00000000..9efd4b6a
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentLinkedFileRequest(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
new file mode 100644
index 00000000..d85d682e
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLinkedFileResponse(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+ file: typing.Optional["AgentLinkedFileResponseFile"] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_response import AgentResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+
+update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
new file mode 100644
index 00000000..42d38fe4
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponse
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
+ from .evaluator_response import EvaluatorResponse
+ from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
+AgentLinkedFileResponseFile = typing.Union[
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
+]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
new file mode 100644
index 00000000..f5b5e8e8
--- /dev/null
+++ b/src/humanloop/types/agent_log_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLogResponse(UncheckedBaseModel):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
+from .flow_log_response import FlowLogResponse # noqa: E402
+from .prompt_log_response import PromptLogResponse # noqa: E402
+from .tool_log_response import ToolLogResponse # noqa: E402
+from .log_response import LogResponse # noqa: E402
+
+update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..5cb07628
--- /dev/null
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentLogResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
new file mode 100644
index 00000000..91547189
--- /dev/null
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+import datetime as dt
+from .chat_message import ChatMessage
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentLogStreamResponse(UncheckedBaseModel):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ ID of the specific version of the Agent.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
new file mode 100644
index 00000000..0487d7b7
--- /dev/null
+++ b/src/humanloop/types/agent_response.py
@@ -0,0 +1,265 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStop
+from .response_format import ResponseFormat
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+import typing_extensions
+from ..core.serialization import FieldMetadata
+from .environment_response import EnvironmentResponse
+import datetime as dt
+from .user_response import UserResponse
+from .version_status import VersionStatus
+from .input_response import InputResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentResponse(UncheckedBaseModel):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str = pydantic.Field()
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.List["AgentResponseToolsItem"] = pydantic.Field()
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the Agent.
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Long description of the file.
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the Agent.
+ """
+
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing.Optional[typing.Literal["agent"]] = None
+ environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None)
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus = pydantic.Field()
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.List[InputResponse] = pydantic.Field()
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None)
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+
+update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..59254f38
--- /dev/null
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py
new file mode 100644
index 00000000..5c3b6a48
--- /dev/null
+++ b/src/humanloop/types/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
new file mode 100644
index 00000000..4c084dc8
--- /dev/null
+++ b/src/humanloop/types/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
new file mode 100644
index 00000000..8095608f
--- /dev/null
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineTool
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponse
+AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool]
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..ebac897b
--- /dev/null
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicRedactedThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["redacted_thinking"] = "redacted_thinking"
+ data: str = pydantic.Field()
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
new file mode 100644
index 00000000..bf7fc808
--- /dev/null
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["thinking"] = "thinking"
+ thinking: str = pydantic.Field()
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str = pydantic.Field()
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c09f2768..c72bc90d 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -6,6 +6,7 @@
import pydantic
from .chat_role import ChatRole
from .tool_call import ToolCall
+from .chat_message_thinking_item import ChatMessageThinkingItem
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel):
A list of tool calls requested by the assistant.
"""
+ thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None)
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
new file mode 100644
index 00000000..0a507724
--- /dev/null
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContent
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+
+ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
new file mode 100644
index 00000000..9dc66629
--- /dev/null
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CreateAgentLogResponse(UncheckedBaseModel):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index af79f597..2c614521 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel):
Description of the Dataset.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 5828a678..51f879b8 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 0bfeebf7..9d0d5fc4 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,8 @@
from .evaluator_response import EvaluatorResponse
from .dataset_response import DatasetResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
- PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse
+ PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 9ba9fe4d..4332aa12 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 413081c6..0c7de27e 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 6c931db0..84d117e2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index f113fff5..bcda94a4 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 1203ce2c..74d59e4c 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index d91e1ee9..e09b2a73 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e457d580..71ca76c0 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -189,6 +191,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 175f456d..712ca698 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -5,6 +5,8 @@
import pydantic
import typing
from .evaluator_response_spec import EvaluatorResponseSpec
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel):
Description of the Evaluator.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -124,6 +133,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
new file mode 100644
index 00000000..128eed92
--- /dev/null
+++ b/src/humanloop/types/event_type.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EventType = typing.Union[
+ typing.Literal[
+ "agent_turn_start",
+ "agent_turn_suspend",
+ "agent_turn_continue",
+ "agent_turn_end",
+ "agent_start",
+ "agent_update",
+ "agent_end",
+ "tool_start",
+ "tool_update",
+ "tool_end",
+ "error",
+ "agent_generation_error",
+ ],
+ typing.Any,
+]
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 70ed322f..7f34b7b3 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 2a105c9d..0254c2b8 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
FileEnvironmentResponseFile = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
new file mode 100644
index 00000000..8108245b
--- /dev/null
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class FileEnvironmentVariableRequest(UncheckedBaseModel):
+ name: str = pydantic.Field()
+ """
+ Name of the environment variable.
+ """
+
+ value: str = pydantic.Field()
+ """
+ Value of the environment variable.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py
index 7a870b84..f235825b 100644
--- a/src/humanloop/types/file_type.py
+++ b/src/humanloop/types/file_type.py
@@ -2,4 +2,4 @@
import typing
-FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any]
+FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any]
diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py
index c32b9755..753d9ba2 100644
--- a/src/humanloop/types/files_tool_type.py
+++ b/src/humanloop/types/files_tool_type.py
@@ -3,5 +3,5 @@
import typing
FilesToolType = typing.Union[
- typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any
+ typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index ba1e1cf6..58a87fac 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -173,6 +175,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 4017b3b7..7768778e 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -4,6 +4,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel):
Description of the Flow.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -111,6 +120,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
new file mode 100644
index 00000000..ee45ffdf
--- /dev/null
+++ b/src/humanloop/types/linked_file_request.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class LinkedFileRequest(UncheckedBaseModel):
+ file_id: str
+ environment_id: typing.Optional[str] = None
+ version_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
new file mode 100644
index 00000000..36481f41
--- /dev/null
+++ b/src/humanloop/types/list_agents.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ListAgents(UncheckedBaseModel):
+ records: typing.List[AgentResponse] = pydantic.Field()
+ """
+ The list of Agents.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 61edbec5..7b736e14 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 686dab26..41ec4008 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index 94cda05e..f773d3f9 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 4080a6a1..84ddc89c 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index 0ba81dd3..cd7a0a26 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -9,4 +9,7 @@
from .tool_log_response import ToolLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
-LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"]
+ from .agent_log_response import AgentLogResponse
+LogResponse = typing.Union[
+ "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
+]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
new file mode 100644
index 00000000..69ffacf4
--- /dev/null
+++ b/src/humanloop/types/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponse
+from .agent_log_stream_response import AgentLogStreamResponse
+
+LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py
index 8473d2ae..3f2c99fb 100644
--- a/src/humanloop/types/model_providers.py
+++ b/src/humanloop/types/model_providers.py
@@ -4,7 +4,7 @@
ModelProviders = typing.Union[
typing.Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek"
+ "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate"
],
typing.Any,
]
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index e70dc4fb..1809af57 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -39,6 +39,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py
new file mode 100644
index 00000000..3730256e
--- /dev/null
+++ b/src/humanloop/types/on_agent_call_enum.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any]
diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py
new file mode 100644
index 00000000..d8c48547
--- /dev/null
+++ b/src/humanloop/types/open_ai_reasoning_effort.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
new file mode 100644
index 00000000..0febbadd
--- /dev/null
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedDataAgentResponse(UncheckedBaseModel):
+ records: typing.List[AgentResponse]
+ page: int
+ size: int
+ total: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index 9e3c568e..c508f8a6 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 275f0528..2e82c736 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 990d58be..6cfcf9ae 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index 57bae587..f41ca9ba 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index ff71e584..d9e1d914 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index 0e52b361..e2962e87 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 76%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index bd7082b3..87d5b603 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -9,16 +11,18 @@
from .version_deployment_response import VersionDeploymentResponse
from .version_id_response import VersionIdResponse
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel):
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
+ UncheckedBaseModel
+):
records: typing.List[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem
]
page: int
size: int
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 63%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 65c4f324..a1b4f056 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-]
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
+ typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
+)
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 78e177e8..16232e0b 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index d587d175..d2d36f78 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -16,9 +18,11 @@
from .model_providers import ModelProviders
from .populate_template_response_stop import PopulateTemplateResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -220,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..8dd9f7f6
--- /dev/null
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index 4e1ae69c..ec74437f 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 6461bb19..80ba5ed5 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -9,12 +9,18 @@
from .model_providers import ModelProviders
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .tool_function import ToolFunction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str = pydantic.Field()
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..dda61bb4
--- /dev/null
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 2a1bad11..a9e26318 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -213,6 +215,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 07f4755d..786617f3 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -10,9 +10,11 @@
from .model_providers import ModelProviders
from .prompt_response_stop import PromptResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -214,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -224,6 +238,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..e136637f
--- /dev/null
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py
deleted file mode 100644
index da0a0354..00000000
--- a/src/humanloop/types/reasoning_effort.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index d94b1178..770dc487 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -5,5 +5,6 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse]
+RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
new file mode 100644
index 00000000..55bf2712
--- /dev/null
+++ b/src/humanloop/types/tool_call_response.py
@@ -0,0 +1,168 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+import datetime as dt
+import pydantic
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ToolCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponse = pydantic.Field()
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 1b6081c3..251223af 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -13,6 +15,7 @@
import datetime as dt
import pydantic
from .log_status import LogStatus
+from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.pydantic_utilities import update_forward_refs
@@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel):
Tool used to generate the Log.
"""
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the Tool.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -162,6 +170,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 0b835918..70537215 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -152,6 +152,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index e2e82d9f..0db57d69 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -36,6 +36,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index e0f73573..4fadcff0 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionDeploymentResponseFile = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index 877851a9..e3f5dc27 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -30,6 +30,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 2f56346c..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
From 3f3e7d244240390ec210bd0bc17a4807ec6d8584 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 24 Apr 2025 14:59:42 +0100
Subject: [PATCH 02/39] set up initial test for sync operation
---
poetry.lock | 126 +++++++++++++++++++++++++++------
pyproject.toml | 5 +-
src/humanloop/client.py | 3 +
src/humanloop/sync/__init__.py | 0
tests/conftest.py | 5 +-
tests/sync/test_sync.py | 95 +++++++++++++++++++++++++
6 files changed, 212 insertions(+), 22 deletions(-)
create mode 100644 src/humanloop/sync/__init__.py
create mode 100644 tests/sync/test_sync.py
diff --git a/poetry.lock b/poetry.lock
index b3099902..e6f81eda 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -17,6 +18,7 @@ version = "0.50.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"},
{file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"},
@@ -41,6 +43,7 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -63,18 +66,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "certifi"
@@ -82,6 +86,7 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -93,6 +98,7 @@ version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
@@ -194,6 +200,7 @@ version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -216,10 +223,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -227,6 +236,7 @@ version = "8.4.2"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"},
{file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"},
@@ -245,6 +255,7 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -254,7 +265,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
@@ -262,6 +273,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -273,6 +285,8 @@ version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -287,6 +301,7 @@ version = "1.10.0"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
{file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
@@ -333,6 +348,7 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -341,7 +357,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fsspec"
@@ -349,6 +365,7 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -388,6 +405,7 @@ version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
{file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
@@ -407,6 +425,7 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -418,6 +437,7 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -439,6 +459,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -451,7 +472,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -463,6 +484,7 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -474,6 +496,7 @@ version = "0.30.2"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main", "dev"]
files = [
{file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"},
{file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"},
@@ -509,6 +532,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -523,6 +547,7 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -532,12 +557,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -546,6 +571,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -557,6 +583,7 @@ version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
{file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
@@ -642,6 +669,7 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -663,6 +691,7 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -677,6 +706,7 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -775,6 +805,7 @@ version = "1.0.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
{file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
@@ -821,6 +852,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -832,6 +864,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -877,6 +910,7 @@ version = "1.76.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"},
{file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"},
@@ -903,6 +937,7 @@ version = "1.32.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"},
{file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"},
@@ -918,6 +953,7 @@ version = "0.53b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"},
{file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"},
@@ -935,6 +971,7 @@ version = "0.39.4"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_anthropic-0.39.4-py3-none-any.whl", hash = "sha256:f3bebc66b5bfdb83fb6a238a15afbe81f690b3f5314cee76ecf8e35121711972"},
{file = "opentelemetry_instrumentation_anthropic-0.39.4.tar.gz", hash = "sha256:15a48d201c97db791b0a1d5e284956178e1d33923ce1c1b90a0735101b83a1a6"},
@@ -952,6 +989,7 @@ version = "0.39.4"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_bedrock-0.39.4-py3-none-any.whl", hash = "sha256:2e74d78b28f7d3928f13826477428aab2ea81e689a851514dda6bf787d0e43f3"},
{file = "opentelemetry_instrumentation_bedrock-0.39.4.tar.gz", hash = "sha256:78a988e58e72a11e29cdce4ddb8cfb790315c22d2e84539066fba8bc2c29da8e"},
@@ -971,6 +1009,7 @@ version = "0.39.4"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_cohere-0.39.4-py3-none-any.whl", hash = "sha256:8408963b1fe1362ab84dd77723a98e54575bc71bf88e25d8252a8de94939773a"},
{file = "opentelemetry_instrumentation_cohere-0.39.4.tar.gz", hash = "sha256:0c1c209801dba0238119977e240acd05501a14a39850961c11effe47e4738780"},
@@ -988,6 +1027,7 @@ version = "0.39.4"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_groq-0.39.4-py3-none-any.whl", hash = "sha256:631e0d2ada72f498721bc06be2bcf68ac656d3fac56180e6656bc7d7e53febc4"},
{file = "opentelemetry_instrumentation_groq-0.39.4.tar.gz", hash = "sha256:8ff5dd2e904af2128c9dd4e79d08264421750ca855731b7a983d6962df7244ca"},
@@ -1005,6 +1045,7 @@ version = "0.39.4"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_openai-0.39.4-py3-none-any.whl", hash = "sha256:94568157e29cb1e0780333b4c3eef42ae6cebb9dbf17383c2b8abcd1fd453bb8"},
{file = "opentelemetry_instrumentation_openai-0.39.4.tar.gz", hash = "sha256:6eaba7ddfe051fed9e33faccc580f38e8ca0da465e34a5a5848bfccfae5b4e21"},
@@ -1023,6 +1064,7 @@ version = "0.39.4"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_replicate-0.39.4-py3-none-any.whl", hash = "sha256:4be73ca3af3afb2444b115618a001842503bea8f4ea0a640f70958d52a420b23"},
{file = "opentelemetry_instrumentation_replicate-0.39.4.tar.gz", hash = "sha256:7683ea3314e68aa2db3a0146a6778790ba64e04bc7e92254014b752c2e7bad40"},
@@ -1040,6 +1082,7 @@ version = "1.32.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"},
{file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"},
@@ -1054,6 +1097,7 @@ version = "1.32.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"},
{file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"},
@@ -1070,6 +1114,7 @@ version = "0.53b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"},
{file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"},
@@ -1085,6 +1130,7 @@ version = "0.4.3"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"},
{file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"},
@@ -1096,6 +1142,7 @@ version = "5.4.0"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "orderly_set-5.4.0-py3-none-any.whl", hash = "sha256:f0192a7f9ae3385b587b71688353fae491d1ca45878496eb71ea118be1623639"},
{file = "orderly_set-5.4.0.tar.gz", hash = "sha256:c8ff5ba824abe4eebcbbdd3f646ff3648ad0dd52239319d90056d8d30b6cccdd"},
@@ -1107,6 +1154,7 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1118,6 +1166,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1204,6 +1253,7 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
+groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1215,6 +1265,7 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1225,9 +1276,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1235,6 +1286,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1250,6 +1302,7 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1270,6 +1323,7 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1324,6 +1378,7 @@ version = "2.11.3"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"},
{file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"},
@@ -1337,7 +1392,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1345,6 +1400,7 @@ version = "2.33.1"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"},
{file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"},
@@ -1456,6 +1512,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1478,6 +1535,7 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1496,6 +1554,7 @@ version = "1.7.0"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
{file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
@@ -1513,6 +1572,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1527,6 +1587,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1541,6 +1602,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1552,6 +1614,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1614,6 +1677,7 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1630,6 +1694,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1733,6 +1798,7 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1750,6 +1816,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1771,6 +1838,7 @@ version = "0.24.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"},
{file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"},
@@ -1894,6 +1962,7 @@ version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
@@ -1921,6 +1990,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1932,6 +2002,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -1943,6 +2014,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -1990,6 +2062,7 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2022,6 +2095,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2063,6 +2138,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2084,6 +2160,7 @@ version = "4.23.0.20241208"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
@@ -2098,6 +2175,7 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2109,6 +2187,7 @@ version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
{file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
@@ -2120,6 +2199,7 @@ version = "2.32.0.20250328"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
@@ -2134,6 +2214,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2145,6 +2226,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2159,6 +2241,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2170,13 +2253,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2187,6 +2271,7 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2275,20 +2360,21 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d"
+content-hash = "a504b0d639ca08283dd45b6af246f7e5f2a6ed5b26fb58e90af77d320ef2045a"
diff --git a/pyproject.toml b/pyproject.toml
index 73f2c3d4..9a81cf79 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,8 @@
[project]
name = "humanloop"
+description = "The Humanloop Python Library"
+authors = []
+keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"]
[tool.poetry]
name = "humanloop"
@@ -54,7 +57,7 @@ pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
-[tool.poetry.dev-dependencies]
+[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 2daa7769..a2510117 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -347,6 +347,9 @@ def agent():
path=path,
attributes=attributes,
)
+
+ def sync(self):
+ return "Hello world"
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/conftest.py b/tests/conftest.py
index 80e3b336..c3e35481 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -192,7 +192,10 @@ def api_keys() -> APIKeys:
@pytest.fixture(scope="session")
def humanloop_client(api_keys: APIKeys) -> Humanloop:
- return Humanloop(api_key=api_keys.humanloop)
+ return Humanloop(
+ api_key=api_keys.humanloop,
+ base_url="http://localhost:80/v5",
+ )
@pytest.fixture(scope="session", autouse=True)
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
new file mode 100644
index 00000000..218d89a9
--- /dev/null
+++ b/tests/sync/test_sync.py
@@ -0,0 +1,95 @@
+import pytest
+from humanloop import Humanloop, FileType
+from pathlib import Path
+from typing import List, NamedTuple
+
+
+class SyncableFile(NamedTuple):
+ path: str
+ type: FileType
+ model: str
+ id: str = ""
+
+
+@pytest.fixture
+def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[SyncableFile]:
+ """Creates a predefined structure of files in Humanloop for testing sync"""
+ files: List[SyncableFile] = [
+ SyncableFile(
+ path="prompts/gpt-4",
+ type="prompt",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="prompts/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="prompts/nested/complex/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="agents/gpt-4",
+ type="agent",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="agents/gpt-4o",
+ type="agent",
+ model="gpt-4o",
+ ),
+ ]
+
+ # Create the files in Humanloop
+ created_files = []
+ for file in files:
+ full_path = get_test_path(file.path)
+ if file.type == "prompt":
+ response = humanloop_client.prompts.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ elif file.type == "agent":
+ response = humanloop_client.agents.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ created_files.append(SyncableFile(path=full_path, type=file.type, model=file.model, id=response.id))
+
+ return created_files
+
+
+@pytest.fixture
+def cleanup_local_files():
+ """Cleanup any locally synced files after tests"""
+ yield
+ # Clean up the local humanloop directory after tests
+ local_dir = Path("humanloop")
+ if local_dir.exists():
+ import shutil
+
+ shutil.rmtree(local_dir)
+
+
+def test_sync_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that humanloop.sync() correctly syncs remote files to local filesystem"""
+ # Run the sync
+ successful_files = humanloop_client.sync()
+
+ # Verify each file was synced correctly
+ for file in test_file_structure:
+ # Get the extension based on file type: .prompt, .agent
+ extension = f".{file.type}"
+
+ # The local path should mirror the remote path structure
+ local_path = Path("humanloop") / f"{file.path}{extension}"
+
+ # Basic assertions
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Verify it's not empty
+ content = local_path.read_text()
+ assert content, f"File at {local_path} should not be empty"
From 0834825159d624192121b0aec8ad3eb90e54d7bb Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 24 Apr 2025 15:05:07 +0100
Subject: [PATCH 03/39] fix type error in test
---
tests/sync/test_sync.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
index 218d89a9..af5fd6b1 100644
--- a/tests/sync/test_sync.py
+++ b/tests/sync/test_sync.py
@@ -1,7 +1,7 @@
-import pytest
-from humanloop import Humanloop, FileType
+from typing import List, NamedTuple, Union
from pathlib import Path
-from typing import List, NamedTuple
+import pytest
+from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
class SyncableFile(NamedTuple):
@@ -46,6 +46,7 @@ def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[Sync
created_files = []
for file in files:
full_path = get_test_path(file.path)
+ response: Union[AgentResponse, PromptResponse]
if file.type == "prompt":
response = humanloop_client.prompts.upsert(
path=full_path,
From 4ff91803ddabcdf14615577b85c640c0b51da634 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 24 Apr 2025 15:42:07 +0100
Subject: [PATCH 04/39] basic sync functionality
---
.fernignore | 1 +
src/humanloop/agents/raw_client.py | 2 +-
src/humanloop/client.py | 33 ++++++-
src/humanloop/prompts/raw_client.py | 2 +-
src/humanloop/sync/__init__.py | 3 +
src/humanloop/sync/sync_utils.py | 130 ++++++++++++++++++++++++++++
tests/conftest.py | 4 +-
7 files changed, 167 insertions(+), 8 deletions(-)
create mode 100644 src/humanloop/sync/sync_utils.py
diff --git a/.fernignore b/.fernignore
index 112f779b..e7ec8aee 100644
--- a/.fernignore
+++ b/.fernignore
@@ -13,6 +13,7 @@ mypy.ini
README.md
src/humanloop/decorators
src/humanloop/otel
+src/humanloop/sync
## Tests
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index 08e04bab..8fa886e0 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -1897,7 +1897,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return HttpResponse(response=_response, data=_response.text)
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index a2510117..e850de9c 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -24,6 +24,7 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
+from humanloop.sync import sync
class ExtendedEvalsClient(EvaluationsClient):
@@ -82,8 +83,9 @@ class Humanloop(BaseHumanloop):
"""
See docstring of :class:`BaseHumanloop`.
- This class extends the base client with custom evaluation utilities
- and decorators for declaring Files in code.
+ This class extends the base client with custom evaluation utilities,
+ decorators for declaring Files in code, and utilities for syncing
+ files between Humanloop and local filesystem.
"""
def __init__(
@@ -348,8 +350,31 @@ def agent():
attributes=attributes,
)
- def sync(self):
- return "Hello world"
+ def sync(self) -> List[str]:
+ """Sync prompt and agent files from Humanloop to local filesystem.
+
+ This method will:
+ 1. Fetch all prompt and agent files from your Humanloop workspace
+ 2. Save them to the local filesystem in a 'humanloop/' directory
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped.
+
+ The files will be saved with the following structure:
+ ```
+ humanloop/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+ ```
+
+ :return: List of successfully processed file paths
+ """
+ return sync(self)
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index e0700ae7..b6ac7f09 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -1804,7 +1804,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return HttpResponse(response=_response, data=_response.text)
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
index e69de29b..7b55e70e 100644
--- a/src/humanloop/sync/__init__.py
+++ b/src/humanloop/sync/__init__.py
@@ -0,0 +1,3 @@
+from humanloop.sync.sync_utils import sync
+
+__all__ = ["sync"]
diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py
new file mode 100644
index 00000000..1e54abba
--- /dev/null
+++ b/src/humanloop/sync/sync_utils.py
@@ -0,0 +1,130 @@
+import os
+import logging
+from pathlib import Path
+import concurrent.futures
+from typing import List, TYPE_CHECKING, Union
+
+from humanloop.types import FileType, PromptResponse, AgentResponse
+from humanloop.core.api_error import ApiError
+
+if TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None:
+ """Save serialized file to local filesystem.
+
+ :param serialized_content: The content to save
+ :param file_path: The path where to save the file
+ :param file_type: The type of file (prompt or agent)
+ """
+ try:
+ # Create full path including humanloop/ prefix
+ full_path = Path("humanloop") / file_path
+ # Create directory if it doesn't exist
+ full_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Add file type extension
+ new_path = full_path.parent / f"{full_path.stem}.{file_type}"
+
+ # Write content to file
+ with open(new_path, "w") as f:
+ f.write(serialized_content)
+ logger.info(f"Syncing {file_type} {file_path}")
+ except Exception as e:
+ logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
+ raise
+
+def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse]) -> None:
+ """Process a single file by serializing and saving it.
+
+ Currently only supports prompt and agent files. Other file types will be skipped.
+
+ :param client: Humanloop client instance
+ :param file: The file to process (must be a PromptResponse or AgentResponse)
+ """
+ try:
+ # Serialize the file based on its type
+ try:
+ if file.type == "prompt":
+ serialized = client.prompts.serialize(id=file.id)
+ elif file.type == "agent":
+ serialized = client.agents.serialize(id=file.id)
+ else:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ return
+ except ApiError as e:
+ # The SDK returns the YAML content in the error body when it can't parse as JSON
+ if e.status_code == 200:
+ serialized = e.body
+ else:
+ raise
+ except Exception as e:
+ logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}")
+ raise
+
+ # Save to local filesystem
+ _save_serialized_file(serialized, file.path, file.type)
+
+ except Exception as e:
+ logger.error(f"Error processing file {file.path}: {str(e)}")
+ raise
+
+def sync(client: "BaseHumanloop") -> List[str]:
+ """Sync prompt and agent files from Humanloop to local filesystem.
+
+ :param client: Humanloop client instance
+ :return: List of successfully processed file paths
+ """
+ successful_files = []
+ failed_files = []
+
+ # Create a thread pool for processing files
+ with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
+ futures = []
+ page = 1
+
+ while True:
+ try:
+ response = client.files.list_files(
+ type=["prompt", "agent"],
+ page=page
+ )
+
+ if len(response.records) == 0:
+ break
+
+ # Submit each file for processing
+ for file in response.records:
+ future = executor.submit(_process_file, client, file)
+ futures.append((file.path, future))
+
+ page += 1
+ except Exception as e:
+ logger.error(f"Failed to fetch page {page}: {str(e)}")
+ break
+
+ # Wait for all tasks to complete
+ for file_path, future in futures:
+ try:
+ future.result()
+ successful_files.append(file_path)
+ except Exception as e:
+ failed_files.append(file_path)
+ logger.error(f"Task failed for {file_path}: {str(e)}")
+
+ # Log summary
+ if successful_files:
+ logger.info(f"\nSynced {len(successful_files)} files")
+ if failed_files:
+ logger.error(f"Failed to sync {len(failed_files)} files")
+
+ return successful_files
\ No newline at end of file
diff --git a/tests/conftest.py b/tests/conftest.py
index c3e35481..272b0d3d 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -217,8 +217,8 @@ def directory_cleanup(directory_id: str, humanloop_client: Humanloop):
client = humanloop_client.evaluators # type: ignore [assignment]
elif file.type == "flow":
client = humanloop_client.flows # type: ignore [assignment]
- else:
- raise NotImplementedError(f"Unknown HL file type {file.type}")
+ elif file.type == "agent":
+ client = humanloop_client.agents # type: ignore [assignment]
client.delete(file_id)
for subdirectory in response.subdirectories:
From c68588723c05c66d396ebfda9796be362c44fa47 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 24 Apr 2025 15:46:32 +0100
Subject: [PATCH 05/39] fix type error and formatting
---
src/humanloop/client.py | 14 ++++----
src/humanloop/sync/sync_utils.py | 59 +++++++++++++++++++-------------
2 files changed, 41 insertions(+), 32 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index e850de9c..9dd8fb28 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -146,9 +146,7 @@ def __init__(
)
if opentelemetry_tracer is None:
- self._opentelemetry_tracer = self._tracer_provider.get_tracer(
- "humanloop.sdk"
- )
+ self._opentelemetry_tracer = self._tracer_provider.get_tracer("humanloop.sdk")
else:
self._opentelemetry_tracer = opentelemetry_tracer
@@ -349,18 +347,18 @@ def agent():
path=path,
attributes=attributes,
)
-
+
def sync(self) -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
-
+
This method will:
1. Fetch all prompt and agent files from your Humanloop workspace
2. Save them to the local filesystem in a 'humanloop/' directory
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
-
+
Currently only supports syncing prompt and agent files. Other file types will be skipped.
-
+
The files will be saved with the following structure:
```
humanloop/
@@ -371,7 +369,7 @@ def sync(self) -> List[str]:
└── agents/
└── my_agent.agent
```
-
+
:return: List of successfully processed file paths
"""
return sync(self)
diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py
index 1e54abba..0e94260f 100644
--- a/src/humanloop/sync/sync_utils.py
+++ b/src/humanloop/sync/sync_utils.py
@@ -2,9 +2,9 @@
import logging
from pathlib import Path
import concurrent.futures
-from typing import List, TYPE_CHECKING, Union
+from typing import List, TYPE_CHECKING, Union, cast
-from humanloop.types import FileType, PromptResponse, AgentResponse
+from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
from humanloop.core.api_error import ApiError
if TYPE_CHECKING:
@@ -19,9 +19,10 @@
if not logger.hasHandlers():
logger.addHandler(console_handler)
+
def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None:
"""Save serialized file to local filesystem.
-
+
:param serialized_content: The content to save
:param file_path: The path where to save the file
:param file_type: The type of file (prompt or agent)
@@ -31,10 +32,10 @@ def _save_serialized_file(serialized_content: str, file_path: str, file_type: Fi
full_path = Path("humanloop") / file_path
# Create directory if it doesn't exist
full_path.parent.mkdir(parents=True, exist_ok=True)
-
+
# Add file type extension
new_path = full_path.parent / f"{full_path.stem}.{file_type}"
-
+
# Write content to file
with open(new_path, "w") as f:
f.write(serialized_content)
@@ -43,15 +44,27 @@ def _save_serialized_file(serialized_content: str, file_path: str, file_type: Fi
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
-def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse]) -> None:
+
+def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]) -> None:
"""Process a single file by serializing and saving it.
-
+
Currently only supports prompt and agent files. Other file types will be skipped.
-
+
:param client: Humanloop client instance
:param file: The file to process (must be a PromptResponse or AgentResponse)
"""
try:
+ # Skip if not a prompt or agent
+ if file.type not in ["prompt", "agent"]:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ return
+
+ # Cast to the correct type for type checking
+ if file.type == "prompt":
+ file = cast(PromptResponse, file)
+ elif file.type == "agent":
+ file = cast(AgentResponse, file)
+
# Serialize the file based on its type
try:
if file.type == "prompt":
@@ -70,48 +83,46 @@ def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResp
except Exception as e:
logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}")
raise
-
+
# Save to local filesystem
_save_serialized_file(serialized, file.path, file.type)
-
+
except Exception as e:
logger.error(f"Error processing file {file.path}: {str(e)}")
raise
+
def sync(client: "BaseHumanloop") -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
-
+
:param client: Humanloop client instance
:return: List of successfully processed file paths
"""
successful_files = []
failed_files = []
-
+
# Create a thread pool for processing files
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = []
page = 1
-
+
while True:
try:
- response = client.files.list_files(
- type=["prompt", "agent"],
- page=page
- )
-
+ response = client.files.list_files(type=["prompt", "agent"], page=page)
+
if len(response.records) == 0:
break
-
+
# Submit each file for processing
for file in response.records:
future = executor.submit(_process_file, client, file)
futures.append((file.path, future))
-
+
page += 1
except Exception as e:
logger.error(f"Failed to fetch page {page}: {str(e)}")
break
-
+
# Wait for all tasks to complete
for file_path, future in futures:
try:
@@ -120,11 +131,11 @@ def sync(client: "BaseHumanloop") -> List[str]:
except Exception as e:
failed_files.append(file_path)
logger.error(f"Task failed for {file_path}: {str(e)}")
-
+
# Log summary
if successful_files:
logger.info(f"\nSynced {len(successful_files)} files")
if failed_files:
logger.error(f"Failed to sync {len(failed_files)} files")
-
- return successful_files
\ No newline at end of file
+
+ return successful_files
From 67f17c8cbbd6cf558bd55ce0da1753273346cb0d Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 25 Apr 2025 14:39:55 +0100
Subject: [PATCH 06/39] refactor sync utils into SyncClient class
---
src/humanloop/client.py | 9 +-
src/humanloop/sync/__init__.py | 4 +-
src/humanloop/sync/sync_client.py | 161 ++++++++++++++++++++++++++++++
src/humanloop/sync/sync_utils.py | 141 --------------------------
4 files changed, 168 insertions(+), 147 deletions(-)
create mode 100644 src/humanloop/sync/sync_client.py
delete mode 100644 src/humanloop/sync/sync_utils.py
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 9dd8fb28..26987bf4 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -24,7 +24,7 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
-from humanloop.sync import sync
+from humanloop.sync.sync_client import SyncClient
class ExtendedEvalsClient(EvaluationsClient):
@@ -118,6 +118,7 @@ def __init__(
httpx_client=httpx_client,
)
+ self.sync_client = SyncClient(client=self)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -348,8 +349,8 @@ def agent():
attributes=attributes,
)
- def sync(self) -> List[str]:
- """Sync prompt and agent files from Humanloop to local filesystem.
+ def pull(self) -> List[str]:
+ """Pull prompt and agent files from Humanloop to local filesystem.
This method will:
1. Fetch all prompt and agent files from your Humanloop workspace
@@ -372,7 +373,7 @@ def sync(self) -> List[str]:
:return: List of successfully processed file paths
"""
- return sync(self)
+ return self.sync_client.pull()
class AsyncHumanloop(AsyncBaseHumanloop):
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
index 7b55e70e..007659df 100644
--- a/src/humanloop/sync/__init__.py
+++ b/src/humanloop/sync/__init__.py
@@ -1,3 +1,3 @@
-from humanloop.sync.sync_utils import sync
+from humanloop.sync.sync_client import SyncClient
-__all__ = ["sync"]
+__all__ = ["SyncClient"]
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
new file mode 100644
index 00000000..979ada1e
--- /dev/null
+++ b/src/humanloop/sync/sync_client.py
@@ -0,0 +1,161 @@
+import multiprocessing
+import os
+import logging
+from pathlib import Path
+import concurrent.futures
+from typing import List, TYPE_CHECKING, Union, cast, Optional
+
+from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+from humanloop.core.api_error import ApiError
+
+if TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+class SyncClient:
+ """Client for managing synchronization between local filesystem and Humanloop."""
+
+ def __init__(
+ self,
+ client: "BaseHumanloop",
+ base_dir: str = "humanloop",
+ max_workers: Optional[int] = None
+ ):
+ """
+ Parameters
+ ----------
+ client: Humanloop client instance
+ base_dir: Base directory for synced files (default: "humanloop")
+ max_workers: Maximum number of worker threads (default: CPU count * 2)
+ """
+ self.client = client
+ self.base_dir = Path(base_dir)
+ self.max_workers = max_workers or multiprocessing.cpu_count() * 2
+
+ def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
+ """Save serialized file to local filesystem.
+
+ Args:
+ serialized_content: The content to save
+ file_path: The path where to save the file
+ file_type: The type of file (prompt or agent)
+ """
+ try:
+ # Create full path including base_dir prefix
+ full_path = self.base_dir / file_path
+ # Create directory if it doesn't exist
+ full_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Add file type extension
+ new_path = full_path.parent / f"{full_path.stem}.{file_type}"
+
+ # Write content to file
+ with open(new_path, "w") as f:
+ f.write(serialized_content)
+ logger.info(f"Syncing {file_type} {file_path}")
+ except Exception as e:
+ logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
+ raise
+
+ def _process_file(
+ self,
+ file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]
+ ) -> None:
+ """Process a single file by serializing and saving it.
+
+ Args:
+ file: The file to process (must be a PromptResponse or AgentResponse)
+ """
+ try:
+ # Skip if not a prompt or agent
+ if file.type not in ["prompt", "agent"]:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ return
+
+ # Cast to the correct type for type checking
+ if file.type == "prompt":
+ file = cast(PromptResponse, file)
+ elif file.type == "agent":
+ file = cast(AgentResponse, file)
+
+ # Serialize the file based on its type
+ try:
+ if file.type == "prompt":
+ serialized = self.client.prompts.serialize(id=file.id)
+ elif file.type == "agent":
+ serialized = self.client.agents.serialize(id=file.id)
+ else:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ return
+ except ApiError as e:
+ # The SDK returns the YAML content in the error body when it can't parse as JSON
+ if e.status_code == 200:
+ serialized = e.body
+ else:
+ raise
+ except Exception as e:
+ logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}")
+ raise
+
+ # Save to local filesystem
+ self._save_serialized_file(serialized, file.path, file.type)
+
+ except Exception as e:
+ logger.error(f"Error processing file {file.path}: {str(e)}")
+ raise
+
+ def pull(self) -> List[str]:
+ """Sync prompt and agent files from Humanloop to local filesystem.
+
+ Returns:
+ List of successfully processed file paths
+ """
+ successful_files = []
+ failed_files = []
+
+ # Create a thread pool for processing files
+ with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
+ futures = []
+ page = 1
+
+ while True:
+ try:
+ response = self.client.files.list_files(type=["prompt", "agent"], page=page)
+
+ if len(response.records) == 0:
+ break
+
+ # Submit each file for processing
+ for file in response.records:
+ future = executor.submit(self._process_file, file)
+ futures.append((file.path, future))
+
+ page += 1
+ except Exception as e:
+ logger.error(f"Failed to fetch page {page}: {str(e)}")
+ break
+
+ # Wait for all tasks to complete
+ for file_path, future in futures:
+ try:
+ future.result()
+ successful_files.append(file_path)
+ except Exception as e:
+ failed_files.append(file_path)
+ logger.error(f"Task failed for {file_path}: {str(e)}")
+
+ # Log summary
+ if successful_files:
+ logger.info(f"\nSynced {len(successful_files)} files")
+ if failed_files:
+ logger.error(f"Failed to sync {len(failed_files)} files")
+
+ return successful_files
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_utils.py b/src/humanloop/sync/sync_utils.py
deleted file mode 100644
index 0e94260f..00000000
--- a/src/humanloop/sync/sync_utils.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import os
-import logging
-from pathlib import Path
-import concurrent.futures
-from typing import List, TYPE_CHECKING, Union, cast
-
-from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-from humanloop.core.api_error import ApiError
-
-if TYPE_CHECKING:
- from humanloop.base_client import BaseHumanloop
-
-# Set up logging
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.INFO)
-console_handler = logging.StreamHandler()
-formatter = logging.Formatter("%(message)s")
-console_handler.setFormatter(formatter)
-if not logger.hasHandlers():
- logger.addHandler(console_handler)
-
-
-def _save_serialized_file(serialized_content: str, file_path: str, file_type: FileType) -> None:
- """Save serialized file to local filesystem.
-
- :param serialized_content: The content to save
- :param file_path: The path where to save the file
- :param file_type: The type of file (prompt or agent)
- """
- try:
- # Create full path including humanloop/ prefix
- full_path = Path("humanloop") / file_path
- # Create directory if it doesn't exist
- full_path.parent.mkdir(parents=True, exist_ok=True)
-
- # Add file type extension
- new_path = full_path.parent / f"{full_path.stem}.{file_type}"
-
- # Write content to file
- with open(new_path, "w") as f:
- f.write(serialized_content)
- logger.info(f"Syncing {file_type} {file_path}")
- except Exception as e:
- logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
- raise
-
-
-def _process_file(client: "BaseHumanloop", file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]) -> None:
- """Process a single file by serializing and saving it.
-
- Currently only supports prompt and agent files. Other file types will be skipped.
-
- :param client: Humanloop client instance
- :param file: The file to process (must be a PromptResponse or AgentResponse)
- """
- try:
- # Skip if not a prompt or agent
- if file.type not in ["prompt", "agent"]:
- logger.warning(f"Skipping unsupported file type: {file.type}")
- return
-
- # Cast to the correct type for type checking
- if file.type == "prompt":
- file = cast(PromptResponse, file)
- elif file.type == "agent":
- file = cast(AgentResponse, file)
-
- # Serialize the file based on its type
- try:
- if file.type == "prompt":
- serialized = client.prompts.serialize(id=file.id)
- elif file.type == "agent":
- serialized = client.agents.serialize(id=file.id)
- else:
- logger.warning(f"Skipping unsupported file type: {file.type}")
- return
- except ApiError as e:
- # The SDK returns the YAML content in the error body when it can't parse as JSON
- if e.status_code == 200:
- serialized = e.body
- else:
- raise
- except Exception as e:
- logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}")
- raise
-
- # Save to local filesystem
- _save_serialized_file(serialized, file.path, file.type)
-
- except Exception as e:
- logger.error(f"Error processing file {file.path}: {str(e)}")
- raise
-
-
-def sync(client: "BaseHumanloop") -> List[str]:
- """Sync prompt and agent files from Humanloop to local filesystem.
-
- :param client: Humanloop client instance
- :return: List of successfully processed file paths
- """
- successful_files = []
- failed_files = []
-
- # Create a thread pool for processing files
- with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
- futures = []
- page = 1
-
- while True:
- try:
- response = client.files.list_files(type=["prompt", "agent"], page=page)
-
- if len(response.records) == 0:
- break
-
- # Submit each file for processing
- for file in response.records:
- future = executor.submit(_process_file, client, file)
- futures.append((file.path, future))
-
- page += 1
- except Exception as e:
- logger.error(f"Failed to fetch page {page}: {str(e)}")
- break
-
- # Wait for all tasks to complete
- for file_path, future in futures:
- try:
- future.result()
- successful_files.append(file_path)
- except Exception as e:
- failed_files.append(file_path)
- logger.error(f"Task failed for {file_path}: {str(e)}")
-
- # Log summary
- if successful_files:
- logger.info(f"\nSynced {len(successful_files)} files")
- if failed_files:
- logger.error(f"Failed to sync {len(failed_files)} files")
-
- return successful_files
From 0b12a30a1c5f3a929a5dd5cf7bf5e09d8c0d497d Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 25 Apr 2025 15:36:45 +0100
Subject: [PATCH 07/39] add client overloads for call method to use local files
at specified path for agents and prompts
---
src/humanloop/client.py | 14 +++++++++++-
src/humanloop/overload.py | 46 +++++++++++++++++++++++++++++++++++++--
tests/sync/test_sync.py | 4 ++--
3 files changed, 59 insertions(+), 5 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 26987bf4..28ddcd52 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -13,7 +13,7 @@
from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log
+from humanloop.overload import overload_call, overload_log, overload_call_with_local_files
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -99,6 +99,7 @@ def __init__(
httpx_client: typing.Optional[httpx.Client] = None,
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
+ use_local_files: bool = False,
):
"""
Extends the base client with custom evaluation utilities and
@@ -118,6 +119,7 @@ def __init__(
httpx_client=httpx_client,
)
+ self.use_local_files = use_local_files
self.sync_client = SyncClient(client=self)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
@@ -128,6 +130,16 @@ def __init__(
# and the @flow decorator providing the trace_id
self.prompts = overload_log(client=self.prompts)
self.prompts = overload_call(client=self.prompts)
+ self.prompts = overload_call_with_local_files(
+ client=self.prompts,
+ use_local_files=self.use_local_files,
+ file_type="prompt"
+ )
+ self.agents = overload_call_with_local_files(
+ client=self.agents,
+ use_local_files=self.use_local_files,
+ file_type="agent"
+ )
self.flows = overload_log(client=self.flows)
self.tools = overload_log(client=self.tools)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index b0c83215..32bce735 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,8 +1,8 @@
import inspect
import logging
import types
-from typing import TypeVar, Union
-
+from typing import TypeVar, Union, Literal
+from pathlib import Path
from humanloop.context import (
get_decorator_context,
get_evaluation_context,
@@ -13,6 +13,7 @@
from humanloop.evaluators.client import EvaluatorsClient
from humanloop.flows.client import FlowsClient
from humanloop.prompts.client import PromptsClient
+from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
@@ -112,6 +113,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
}
try:
+ logger.info(f"Calling inner overload")
response = self._call(**kwargs)
except Exception as e:
# Re-raising as HumanloopDecoratorError so the decorators don't catch it
@@ -122,3 +124,43 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
# Replace the original log method with the overloaded one
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
+
+def overload_call_with_local_files(
+ client: Union[PromptsClient, AgentsClient],
+ use_local_files: bool,
+ file_type: Literal["prompt", "agent"]
+) -> Union[PromptsClient, AgentsClient]:
+ """Overload call to handle local files when use_local_files is True.
+
+ Args:
+ client: The client to overload (PromptsClient or AgentsClient)
+ use_local_files: Whether to use local files
+ file_type: Type of file ("prompt" or "agent")
+ """
+ original_call = client._call if hasattr(client, '_call') else client.call
+
+ def _overload_call(self, **kwargs) -> PromptCallResponse:
+ if use_local_files and "path" in kwargs:
+ try:
+ # Construct path to local file
+ local_path = Path("humanloop") / kwargs["path"]
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if local_path.exists():
+ # Read the file content
+ with open(local_path) as f:
+ file_content = f.read()
+
+ kwargs[file_type] = file_content # "prompt" or "agent"
+
+ logger.debug(f"Using local file content from {local_path}")
+ else:
+ logger.warning(f"Local file not found: {local_path}, falling back to API")
+ except Exception as e:
+ logger.error(f"Error reading local file: {e}, falling back to API")
+
+ return original_call(**kwargs)
+
+ client.call = types.MethodType(_overload_call, client)
+ return client
\ No newline at end of file
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
index af5fd6b1..520a1979 100644
--- a/tests/sync/test_sync.py
+++ b/tests/sync/test_sync.py
@@ -74,10 +74,10 @@ def cleanup_local_files():
shutil.rmtree(local_dir)
-def test_sync_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
"""Test that humanloop.sync() correctly syncs remote files to local filesystem"""
# Run the sync
- successful_files = humanloop_client.sync()
+ successful_files = humanloop_client.pull()
# Verify each file was synced correctly
for file in test_file_structure:
From 612046e795fef66e48db4aa5caba94fac547374d Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 28 Apr 2025 16:38:44 +0100
Subject: [PATCH 08/39] infer file type from client passed into call overload
---
src/humanloop/overload.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 32bce735..3caad33b 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -15,6 +15,7 @@
from humanloop.prompts.client import PromptsClient
from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
+from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
@@ -128,7 +129,6 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
def overload_call_with_local_files(
client: Union[PromptsClient, AgentsClient],
use_local_files: bool,
- file_type: Literal["prompt", "agent"]
) -> Union[PromptsClient, AgentsClient]:
"""Overload call to handle local files when use_local_files is True.
@@ -138,6 +138,14 @@ def overload_call_with_local_files(
file_type: Type of file ("prompt" or "agent")
"""
original_call = client._call if hasattr(client, '_call') else client.call
+ # get file type from client type
+ file_type: FileType
+ if isinstance(client, PromptsClient):
+ file_type = "prompt"
+ elif isinstance(client, AgentsClient):
+ file_type = "agent"
+ else:
+ raise ValueError(f"Unsupported client type: {type(client)}")
def _overload_call(self, **kwargs) -> PromptCallResponse:
if use_local_files and "path" in kwargs:
@@ -152,7 +160,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
with open(local_path) as f:
file_content = f.read()
- kwargs[file_type] = file_content # "prompt" or "agent"
+ kwargs[file_type] = file_content # "prompt" or "agent" # TODO: raise warning if kernel passed in
logger.debug(f"Using local file content from {local_path}")
else:
From 48262a1dc58c4ef9c6ff4f410b2ba648e98fec18 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 28 Apr 2025 16:45:28 +0100
Subject: [PATCH 09/39] simplify sync client to use updated list files endpoint
that includes serialized content
---
src/humanloop/client.py | 6 +-
src/humanloop/sync/sync_client.py | 106 +++++++++---------------------
2 files changed, 34 insertions(+), 78 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 28ddcd52..64eaef7c 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -132,13 +132,11 @@ def __init__(
self.prompts = overload_call(client=self.prompts)
self.prompts = overload_call_with_local_files(
client=self.prompts,
- use_local_files=self.use_local_files,
- file_type="prompt"
+ use_local_files=self.use_local_files
)
self.agents = overload_call_with_local_files(
client=self.agents,
- use_local_files=self.use_local_files,
- file_type="agent"
+ use_local_files=self.use_local_files
)
self.flows = overload_log(client=self.flows)
self.tools = overload_log(client=self.tools)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 979ada1e..97a82df1 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -65,53 +65,6 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
- def _process_file(
- self,
- file: Union[PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse]
- ) -> None:
- """Process a single file by serializing and saving it.
-
- Args:
- file: The file to process (must be a PromptResponse or AgentResponse)
- """
- try:
- # Skip if not a prompt or agent
- if file.type not in ["prompt", "agent"]:
- logger.warning(f"Skipping unsupported file type: {file.type}")
- return
-
- # Cast to the correct type for type checking
- if file.type == "prompt":
- file = cast(PromptResponse, file)
- elif file.type == "agent":
- file = cast(AgentResponse, file)
-
- # Serialize the file based on its type
- try:
- if file.type == "prompt":
- serialized = self.client.prompts.serialize(id=file.id)
- elif file.type == "agent":
- serialized = self.client.agents.serialize(id=file.id)
- else:
- logger.warning(f"Skipping unsupported file type: {file.type}")
- return
- except ApiError as e:
- # The SDK returns the YAML content in the error body when it can't parse as JSON
- if e.status_code == 200:
- serialized = e.body
- else:
- raise
- except Exception as e:
- logger.error(f"Failed to serialize {file.type} {file.id}: {str(e)}")
- raise
-
- # Save to local filesystem
- self._save_serialized_file(serialized, file.path, file.type)
-
- except Exception as e:
- logger.error(f"Error processing file {file.path}: {str(e)}")
- raise
-
def pull(self) -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
@@ -120,37 +73,42 @@ def pull(self) -> List[str]:
"""
successful_files = []
failed_files = []
+ page = 1
- # Create a thread pool for processing files
- with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
- futures = []
- page = 1
-
- while True:
- try:
- response = self.client.files.list_files(type=["prompt", "agent"], page=page)
-
- if len(response.records) == 0:
- break
-
- # Submit each file for processing
- for file in response.records:
- future = executor.submit(self._process_file, file)
- futures.append((file.path, future))
+ while True:
+ try:
+ response = self.client.files.list_files(
+ type=["prompt", "agent"],
+ page=page,
+ include_content=True
+ )
- page += 1
- except Exception as e:
- logger.error(f"Failed to fetch page {page}: {str(e)}")
+ if len(response.records) == 0:
break
- # Wait for all tasks to complete
- for file_path, future in futures:
- try:
- future.result()
- successful_files.append(file_path)
- except Exception as e:
- failed_files.append(file_path)
- logger.error(f"Task failed for {file_path}: {str(e)}")
+ # Process each file
+ for file in response.records:
+ # Skip if not a prompt or agent
+ if file.type not in ["prompt", "agent"]:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ continue
+
+ # Skip if no content
+ if not getattr(file, "content", None):
+ logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
+ continue
+
+ try:
+ self._save_serialized_file(file.content, file.path, file.type)
+ successful_files.append(file.path)
+ except Exception as e:
+ failed_files.append(file.path)
+ logger.error(f"Task failed for {file.path}: {str(e)}")
+
+ page += 1
+ except Exception as e:
+ logger.error(f"Failed to fetch page {page}: {str(e)}")
+ break
# Log summary
if successful_files:
From 2eb8ec14c083a037872d5a34670652c8cdfb79a3 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 28 Apr 2025 16:48:06 +0100
Subject: [PATCH 10/39] update test fixture to use staging
---
tests/conftest.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/conftest.py b/tests/conftest.py
index 272b0d3d..20c8ae79 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -194,7 +194,7 @@ def api_keys() -> APIKeys:
def humanloop_client(api_keys: APIKeys) -> Humanloop:
return Humanloop(
api_key=api_keys.humanloop,
- base_url="http://localhost:80/v5",
+ base_url="https://neostaging.humanloop.ml/v5/",
)
From 152fbfa0d4ebef6fe9157699c0b0d16bee6de598 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 29 Apr 2025 12:20:15 +0100
Subject: [PATCH 11/39] overload log operation to use local files when relevant
---
src/humanloop/client.py | 9 +++++----
src/humanloop/overload.py | 23 ++++++++++++++++++-----
tests/conftest.py | 2 +-
3 files changed, 24 insertions(+), 10 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 64eaef7c..d96d8dad 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -13,7 +13,7 @@
from humanloop.evals.types import Dataset, Evaluator, EvaluatorCheck, File
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log, overload_call_with_local_files
+from humanloop.overload import overload_call, overload_log, overload_with_local_files
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -77,6 +77,7 @@ class ExtendedPromptsClient(PromptsClient):
"""
populate_template = staticmethod(populate_template) # type: ignore [assignment]
+ load = staticmethod(load)
class Humanloop(BaseHumanloop):
@@ -130,11 +131,11 @@ def __init__(
# and the @flow decorator providing the trace_id
self.prompts = overload_log(client=self.prompts)
self.prompts = overload_call(client=self.prompts)
- self.prompts = overload_call_with_local_files(
+ self.prompts = overload_with_local_files(
client=self.prompts,
use_local_files=self.use_local_files
)
- self.agents = overload_call_with_local_files(
+ self.agents = overload_with_local_files(
client=self.agents,
use_local_files=self.use_local_files
)
@@ -393,4 +394,4 @@ class AsyncHumanloop(AsyncBaseHumanloop):
TODO: Add custom evaluation utilities for async case.
"""
- pass
+ pass
\ No newline at end of file
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 3caad33b..a550c92a 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -126,7 +126,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
-def overload_call_with_local_files(
+def overload_with_local_files(
client: Union[PromptsClient, AgentsClient],
use_local_files: bool,
) -> Union[PromptsClient, AgentsClient]:
@@ -138,6 +138,7 @@ def overload_call_with_local_files(
file_type: Type of file ("prompt" or "agent")
"""
original_call = client._call if hasattr(client, '_call') else client.call
+ original_log = client._log if hasattr(client, '_log') else client.log
# get file type from client type
file_type: FileType
if isinstance(client, PromptsClient):
@@ -147,11 +148,11 @@ def overload_call_with_local_files(
else:
raise ValueError(f"Unsupported client type: {type(client)}")
- def _overload_call(self, **kwargs) -> PromptCallResponse:
+ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
if use_local_files and "path" in kwargs:
try:
# Construct path to local file
- local_path = Path("humanloop") / kwargs["path"]
+ local_path = Path("humanloop") / kwargs["path"] # FLAG: ensure that when passing the path back to remote, it's using forward slashes
# Add appropriate extension
local_path = local_path.parent / f"{local_path.stem}.{file_type}"
@@ -160,7 +161,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
with open(local_path) as f:
file_content = f.read()
- kwargs[file_type] = file_content # "prompt" or "agent" # TODO: raise warning if kernel passed in
+ kwargs[file_type] = file_content # "prompt" or "agent" # TODO: raise warning if kernel passed in
logger.debug(f"Using local file content from {local_path}")
else:
@@ -168,7 +169,19 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
except Exception as e:
logger.error(f"Error reading local file: {e}, falling back to API")
- return original_call(**kwargs)
+ if function_name == "call":
+ return original_call(**kwargs)
+ elif function_name == "log":
+ return original_log(**kwargs)
+ else:
+ raise ValueError(f"Unsupported function name: {function_name}")
+
+ def _overload_call(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "call", **kwargs)
+
+ def _overload_log(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "log", **kwargs)
client.call = types.MethodType(_overload_call, client)
+ client.log = types.MethodType(_overload_log, client)
return client
\ No newline at end of file
diff --git a/tests/conftest.py b/tests/conftest.py
index 20c8ae79..fa213133 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -194,7 +194,7 @@ def api_keys() -> APIKeys:
def humanloop_client(api_keys: APIKeys) -> Humanloop:
return Humanloop(
api_key=api_keys.humanloop,
- base_url="https://neostaging.humanloop.ml/v5/",
+ base_url="http://localhost:80/v5/",
)
From c453a6c2c884ad7d7cb7d4e2c33b88dc4a86e740 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 29 Apr 2025 11:25:21 +0000
Subject: [PATCH 12/39] Release 0.8.36
---
poetry.lock | 76 +-
pyproject.toml | 2 +-
reference.md | 5977 +++++++++++++----
src/humanloop/__init__.py | 237 +-
src/humanloop/agents/__init__.py | 49 +
src/humanloop/agents/client.py | 3210 +++++++++
src/humanloop/agents/raw_client.py | 3891 +++++++++++
src/humanloop/agents/requests/__init__.py | 25 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agent_log_request_tool_choice.py | 8 +
.../agent_request_reasoning_effort.py | 6 +
.../agents/requests/agent_request_stop.py | 5 +
.../agents/requests/agent_request_template.py | 6 +
.../requests/agent_request_tools_item.py | 7 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_request_tool_choice.py | 8 +
.../agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/agents/types/__init__.py | 25 +
.../agents/types/agent_log_request_agent.py | 6 +
.../types/agent_log_request_tool_choice.py | 8 +
.../types/agent_request_reasoning_effort.py | 6 +
.../agents/types/agent_request_stop.py | 5 +
.../agents/types/agent_request_template.py | 6 +
.../agents/types/agent_request_tools_item.py | 7 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_request_tool_choice.py | 8 +
.../types/agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/base_client.py | 4 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/files/client.py | 22 +-
src/humanloop/files/raw_client.py | 34 +-
...th_files_retrieve_by_path_post_response.py | 8 +-
...th_files_retrieve_by_path_post_response.py | 3 +-
src/humanloop/flows/client.py | 8 +-
src/humanloop/logs/client.py | 4 +-
src/humanloop/prompts/__init__.py | 16 +
src/humanloop/prompts/client.py | 267 +-
src/humanloop/prompts/raw_client.py | 335 +-
src/humanloop/prompts/requests/__init__.py | 8 +
.../requests/prompt_log_request_prompt.py | 6 +
.../prompt_request_reasoning_effort.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 8 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompt_request_reasoning_effort.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/requests/__init__.py | 88 +-
src/humanloop/requests/agent_call_response.py | 202 +
.../agent_call_response_tool_choice.py | 8 +
.../requests/agent_call_stream_response.py | 19 +
.../agent_call_stream_response_payload.py | 8 +
.../requests/agent_continue_response.py | 202 +
.../agent_continue_response_tool_choice.py | 8 +
.../agent_continue_stream_response.py | 19 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/requests/agent_inline_tool.py | 13 +
.../requests/agent_kernel_request.py | 112 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../requests/agent_kernel_request_stop.py | 5 +
.../requests/agent_kernel_request_template.py | 6 +
.../agent_kernel_request_tools_item.py | 7 +
.../requests/agent_linked_file_request.py | 13 +
.../requests/agent_linked_file_response.py | 19 +
.../agent_linked_file_response_file.py | 21 +
src/humanloop/requests/agent_log_response.py | 201 +
.../agent_log_response_tool_choice.py | 8 +
.../requests/agent_log_stream_response.py | 87 +
src/humanloop/requests/agent_response.py | 242 +
.../agent_response_reasoning_effort.py | 6 +
src/humanloop/requests/agent_response_stop.py | 5 +
.../requests/agent_response_template.py | 6 +
.../requests/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 12 +
.../requests/anthropic_thinking_content.py | 17 +
src/humanloop/requests/chat_message.py | 6 +
.../requests/chat_message_thinking_item.py | 7 +
.../requests/create_agent_log_response.py | 31 +
src/humanloop/requests/dataset_response.py | 5 +
...arents_and_children_response_files_item.py | 8 +-
src/humanloop/requests/evaluator_response.py | 5 +
.../file_environment_response_file.py | 8 +-
.../file_environment_variable_request.py | 15 +
src/humanloop/requests/flow_response.py | 5 +
src/humanloop/requests/linked_file_request.py | 10 +
src/humanloop/requests/list_agents.py | 12 +
src/humanloop/requests/log_response.py | 7 +-
src/humanloop/requests/log_stream_response.py | 7 +
.../requests/paginated_data_agent_response.py | 12 +
..._response_flow_response_agent_response.py} | 8 +-
...w_response_agent_response_records_item.py} | 14 +-
.../requests/populate_template_response.py | 16 +-
...late_template_response_reasoning_effort.py | 6 +
.../requests/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/requests/prompt_response.py | 16 +-
.../prompt_response_reasoning_effort.py | 6 +
.../requests/run_version_response.py | 3 +-
src/humanloop/requests/tool_call_response.py | 146 +
src/humanloop/requests/tool_log_response.py | 6 +
.../version_deployment_response_file.py | 8 +-
.../requests/version_id_response_version.py | 8 +-
src/humanloop/tools/client.py | 523 +-
src/humanloop/tools/raw_client.py | 765 ++-
src/humanloop/types/__init__.py | 96 +-
src/humanloop/types/agent_call_response.py | 224 +
.../types/agent_call_response_tool_choice.py | 8 +
.../types/agent_call_stream_response.py | 44 +
.../agent_call_stream_response_payload.py | 8 +
.../types/agent_continue_response.py | 224 +
.../agent_continue_response_tool_choice.py | 8 +
.../types/agent_continue_stream_response.py | 44 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/types/agent_inline_tool.py | 23 +
src/humanloop/types/agent_kernel_request.py | 122 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../types/agent_kernel_request_stop.py | 5 +
.../types/agent_kernel_request_template.py | 6 +
.../types/agent_kernel_request_tools_item.py | 7 +
.../types/agent_linked_file_request.py | 23 +
.../types/agent_linked_file_response.py | 39 +
.../types/agent_linked_file_response_file.py | 16 +
src/humanloop/types/agent_log_response.py | 224 +
.../types/agent_log_response_tool_choice.py | 8 +
.../types/agent_log_stream_response.py | 98 +
src/humanloop/types/agent_response.py | 265 +
.../types/agent_response_reasoning_effort.py | 6 +
src/humanloop/types/agent_response_stop.py | 5 +
.../types/agent_response_template.py | 6 +
.../types/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 23 +
.../types/anthropic_thinking_content.py | 28 +
src/humanloop/types/chat_message.py | 6 +
.../types/chat_message_thinking_item.py | 7 +
.../types/create_agent_log_response.py | 42 +
src/humanloop/types/dataset_response.py | 9 +
...tory_with_parents_and_children_response.py | 2 +
...arents_and_children_response_files_item.py | 3 +-
src/humanloop/types/evaluatee_response.py | 2 +
.../types/evaluation_evaluator_response.py | 2 +
.../types/evaluation_log_response.py | 3 +
src/humanloop/types/evaluation_response.py | 2 +
.../types/evaluation_run_response.py | 2 +
.../types/evaluation_runs_response.py | 2 +
src/humanloop/types/evaluator_log_response.py | 3 +
src/humanloop/types/evaluator_response.py | 11 +
src/humanloop/types/event_type.py | 21 +
.../types/file_environment_response.py | 2 +
.../types/file_environment_response_file.py | 3 +-
.../file_environment_variable_request.py | 27 +
src/humanloop/types/file_type.py | 2 +-
src/humanloop/types/files_tool_type.py | 2 +-
src/humanloop/types/flow_log_response.py | 3 +
src/humanloop/types/flow_response.py | 11 +
src/humanloop/types/linked_file_request.py | 21 +
src/humanloop/types/list_agents.py | 31 +
src/humanloop/types/list_evaluators.py | 2 +
src/humanloop/types/list_flows.py | 2 +
src/humanloop/types/list_prompts.py | 2 +
src/humanloop/types/list_tools.py | 2 +
src/humanloop/types/log_response.py | 5 +-
src/humanloop/types/log_stream_response.py | 7 +
src/humanloop/types/model_providers.py | 2 +-
.../types/monitoring_evaluator_response.py | 2 +
src/humanloop/types/on_agent_call_enum.py | 5 +
.../types/open_ai_reasoning_effort.py | 5 +
.../types/paginated_data_agent_response.py | 31 +
.../paginated_data_evaluation_log_response.py | 3 +
.../paginated_data_evaluator_response.py | 2 +
.../types/paginated_data_flow_response.py | 2 +
.../types/paginated_data_log_response.py | 3 +
.../types/paginated_data_prompt_response.py | 2 +
.../types/paginated_data_tool_response.py | 2 +
..._response_flow_response_agent_response.py} | 12 +-
...w_response_agent_response_records_item.py} | 7 +-
.../types/paginated_evaluation_response.py | 2 +
.../types/populate_template_response.py | 22 +-
...late_template_response_reasoning_effort.py | 6 +
src/humanloop/types/prompt_call_response.py | 2 +
src/humanloop/types/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/types/prompt_log_response.py | 3 +
src/humanloop/types/prompt_response.py | 22 +-
.../types/prompt_response_reasoning_effort.py | 6 +
src/humanloop/types/reasoning_effort.py | 5 -
src/humanloop/types/run_version_response.py | 3 +-
src/humanloop/types/tool_call_response.py | 168 +
src/humanloop/types/tool_log_response.py | 9 +
src/humanloop/types/tool_response.py | 2 +
.../types/version_deployment_response.py | 2 +
.../types/version_deployment_response_file.py | 3 +-
src/humanloop/types/version_id_response.py | 2 +
.../types/version_id_response_version.py | 3 +-
196 files changed, 17928 insertions(+), 1683 deletions(-)
create mode 100644 src/humanloop/agents/__init__.py
create mode 100644 src/humanloop/agents/client.py
create mode 100644 src/humanloop/agents/raw_client.py
create mode 100644 src/humanloop/agents/requests/__init__.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/requests/agent_request_stop.py
create mode 100644 src/humanloop/agents/requests/agent_request_template.py
create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/__init__.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/types/agent_request_stop.py
create mode 100644 src/humanloop/agents/types/agent_request_template.py
create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/requests/agent_call_response.py
create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_call_stream_response.py
create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_continue_response.py
create mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_inline_tool.py
create mode 100644 src/humanloop/requests/agent_kernel_request.py
create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py
create mode 100644 src/humanloop/requests/agent_kernel_request_template.py
create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/requests/agent_linked_file_request.py
create mode 100644 src/humanloop/requests/agent_linked_file_response.py
create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py
create mode 100644 src/humanloop/requests/agent_log_response.py
create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_log_stream_response.py
create mode 100644 src/humanloop/requests/agent_response.py
create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_response_stop.py
create mode 100644 src/humanloop/requests/agent_response_template.py
create mode 100644 src/humanloop/requests/agent_response_tools_item.py
create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/requests/anthropic_thinking_content.py
create mode 100644 src/humanloop/requests/chat_message_thinking_item.py
create mode 100644 src/humanloop/requests/create_agent_log_response.py
create mode 100644 src/humanloop/requests/file_environment_variable_request.py
create mode 100644 src/humanloop/requests/linked_file_request.py
create mode 100644 src/humanloop/requests/list_agents.py
create mode 100644 src/humanloop/requests/log_stream_response.py
create mode 100644 src/humanloop/requests/paginated_data_agent_response.py
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%)
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%)
create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/tool_call_response.py
create mode 100644 src/humanloop/types/agent_call_response.py
create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_call_stream_response.py
create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_continue_response.py
create mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_continue_stream_response.py
create mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_inline_tool.py
create mode 100644 src/humanloop/types/agent_kernel_request.py
create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_kernel_request_stop.py
create mode 100644 src/humanloop/types/agent_kernel_request_template.py
create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/types/agent_linked_file_request.py
create mode 100644 src/humanloop/types/agent_linked_file_response.py
create mode 100644 src/humanloop/types/agent_linked_file_response_file.py
create mode 100644 src/humanloop/types/agent_log_response.py
create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_log_stream_response.py
create mode 100644 src/humanloop/types/agent_response.py
create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_response_stop.py
create mode 100644 src/humanloop/types/agent_response_template.py
create mode 100644 src/humanloop/types/agent_response_tools_item.py
create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/types/anthropic_thinking_content.py
create mode 100644 src/humanloop/types/chat_message_thinking_item.py
create mode 100644 src/humanloop/types/create_agent_log_response.py
create mode 100644 src/humanloop/types/event_type.py
create mode 100644 src/humanloop/types/file_environment_variable_request.py
create mode 100644 src/humanloop/types/linked_file_request.py
create mode 100644 src/humanloop/types/list_agents.py
create mode 100644 src/humanloop/types/log_stream_response.py
create mode 100644 src/humanloop/types/on_agent_call_enum.py
create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py
create mode 100644 src/humanloop/types/paginated_data_agent_response.py
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (76%)
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%)
create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py
delete mode 100644 src/humanloop/types/reasoning_effort.py
create mode 100644 src/humanloop/types/tool_call_response.py
diff --git a/poetry.lock b/poetry.lock
index 4ce5d536..b3099902 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -78,13 +78,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
-version = "2025.1.31"
+version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
- {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
+ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
+ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.0"
+version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"},
- {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"},
+ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
+ {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
]
[package.dependencies]
@@ -403,29 +403,29 @@ typing-extensions = ">=4.10,<5"
[[package]]
name = "h11"
-version = "0.14.0"
+version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
+ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
-version = "1.0.8"
+version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
- {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
+ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
+ {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
-h11 = ">=0.13,<0.15"
+h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.75.0"
+version = "1.76.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"},
- {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"},
+ {file = "openai-1.76.0-py3-none-any.whl", hash = "sha256:a712b50e78cf78e6d7b2a8f69c4978243517c2c36999756673e07a14ce37dc0a"},
+ {file = "openai-1.76.0.tar.gz", hash = "sha256:fd2bfaf4608f48102d6b74f9e11c5ecaa058b60dad9c36e409c12477dfd91fb2"},
]
[package.dependencies]
@@ -931,13 +931,13 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"},
- {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"},
+ {file = "opentelemetry_instrumentation_anthropic-0.39.4-py3-none-any.whl", hash = "sha256:f3bebc66b5bfdb83fb6a238a15afbe81f690b3f5314cee76ecf8e35121711972"},
+ {file = "opentelemetry_instrumentation_anthropic-0.39.4.tar.gz", hash = "sha256:15a48d201c97db791b0a1d5e284956178e1d33923ce1c1b90a0735101b83a1a6"},
]
[package.dependencies]
@@ -948,13 +948,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"},
- {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"},
+ {file = "opentelemetry_instrumentation_bedrock-0.39.4-py3-none-any.whl", hash = "sha256:2e74d78b28f7d3928f13826477428aab2ea81e689a851514dda6bf787d0e43f3"},
+ {file = "opentelemetry_instrumentation_bedrock-0.39.4.tar.gz", hash = "sha256:78a988e58e72a11e29cdce4ddb8cfb790315c22d2e84539066fba8bc2c29da8e"},
]
[package.dependencies]
@@ -967,13 +967,13 @@ tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"},
- {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"},
+ {file = "opentelemetry_instrumentation_cohere-0.39.4-py3-none-any.whl", hash = "sha256:8408963b1fe1362ab84dd77723a98e54575bc71bf88e25d8252a8de94939773a"},
+ {file = "opentelemetry_instrumentation_cohere-0.39.4.tar.gz", hash = "sha256:0c1c209801dba0238119977e240acd05501a14a39850961c11effe47e4738780"},
]
[package.dependencies]
@@ -984,13 +984,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"},
- {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"},
+ {file = "opentelemetry_instrumentation_groq-0.39.4-py3-none-any.whl", hash = "sha256:631e0d2ada72f498721bc06be2bcf68ac656d3fac56180e6656bc7d7e53febc4"},
+ {file = "opentelemetry_instrumentation_groq-0.39.4.tar.gz", hash = "sha256:8ff5dd2e904af2128c9dd4e79d08264421750ca855731b7a983d6962df7244ca"},
]
[package.dependencies]
@@ -1001,13 +1001,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"},
- {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"},
+ {file = "opentelemetry_instrumentation_openai-0.39.4-py3-none-any.whl", hash = "sha256:94568157e29cb1e0780333b4c3eef42ae6cebb9dbf17383c2b8abcd1fd453bb8"},
+ {file = "opentelemetry_instrumentation_openai-0.39.4.tar.gz", hash = "sha256:6eaba7ddfe051fed9e33faccc580f38e8ca0da465e34a5a5848bfccfae5b4e21"},
]
[package.dependencies]
@@ -1019,13 +1019,13 @@ tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.39.2"
+version = "0.39.4"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"},
- {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.39.4-py3-none-any.whl", hash = "sha256:4be73ca3af3afb2444b115618a001842503bea8f4ea0a640f70958d52a420b23"},
+ {file = "opentelemetry_instrumentation_replicate-0.39.4.tar.gz", hash = "sha256:7683ea3314e68aa2db3a0146a6778790ba64e04bc7e92254014b752c2e7bad40"},
]
[package.dependencies]
@@ -1729,13 +1729,13 @@ files = [
[[package]]
name = "replicate"
-version = "1.0.4"
+version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
files = [
- {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"},
- {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"},
+ {file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
+ {file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index ad96beec..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.35"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 27a32c92..4ec04c0e 100644
--- a/reference.md
+++ b/reference.md
@@ -56,7 +56,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1501,7 +1516,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2533,7 @@ client.prompts.update_monitoring(
-## Tools
-client.tools.log(...)
+client.prompts.serialize(...)
-
@@ -2531,15 +2545,13 @@ client.prompts.update_monitoring(
-
-Log to a Tool.
+Serialize a Prompt to the .prompt file format.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Tool. Otherwise the default deployed version will be chosen.
+Useful for storing the Prompt with your code in a version control system,
+or for editing with an AI tool.
-Instead of targeting an existing version explicitly, you can instead pass in
-Tool details in the request body. In this case, we will check if the details correspond
-to an existing version of the Tool, if not we will create a new version. This is helpful
-in the case where you are storing or deriving your Tool details in code.
+By default, the deployed version of the Prompt is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2571,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
+client.prompts.serialize(
+ id="id",
)
```
@@ -2593,7 +2589,7 @@ client.tools.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
+**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2597,7 @@ client.tools.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2605,7 @@ client.tools.log(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2613,72 @@ client.tools.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.prompts.deserialize(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deserialize a Prompt from the .prompt file format.
+
+This returns a subset of the attributes required by a Prompt.
+This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+#### 🔌 Usage
+
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.deserialize(
+ prompt="prompt",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+-
+
+**prompt:** `str`
@@ -2649,15 +2686,78 @@ client.tools.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## Tools
+client.tools.call(...)
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call a Tool.
+
+Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2765,7 @@ client.tools.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2773,7 @@ client.tools.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2781,7 @@ client.tools.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2789,7 @@ client.tools.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2821,7 @@ client.tools.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2829,7 @@ client.tools.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2837,7 @@ client.tools.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2845,7 @@ client.tools.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2853,7 @@ client.tools.log(
-
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2861,7 @@ client.tools.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2869,7 @@ client.tools.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2877,15 @@ client.tools.log(
-
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2905,7 @@ client.tools.log(
-client.tools.update(...)
+client.tools.log(...)
-
@@ -2809,9 +2917,15 @@ client.tools.log(
-
-Update a Log.
+Log to a Tool.
-Update the details of a Log with the given ID.
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool, if not we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2945,24 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update(
- id="id",
- log_id="log_id",
+client.tools.log(
+ path="math-tool",
+ tool={
+ "function": {
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": "number"},
+ },
+ "required": ["a", "b"],
+ },
+ }
+ },
+ inputs={"a": 5, "b": 7},
+ output="35",
)
```
@@ -2850,7 +2979,7 @@ client.tools.update(
-
-**id:** `str` — Unique identifier for Prompt.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2987,7 @@ client.tools.update(
-
-**log_id:** `str` — Unique identifier for the Log.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2995,7 @@ client.tools.update(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +3003,7 @@ client.tools.update(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +3011,7 @@ client.tools.update(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3019,7 @@ client.tools.update(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3027,7 @@ client.tools.update(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3035,7 @@ client.tools.update(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3043,7 @@ client.tools.update(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3051,7 @@ client.tools.update(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3059,7 @@ client.tools.update(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3067,7 @@ client.tools.update(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3075,7 @@ client.tools.update(
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3083,7 @@ client.tools.update(
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3091,7 @@ client.tools.update(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,74 +3099,31 @@ client.tools.update(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.tools.list(...)
-
-#### 📝 Description
-
-
--
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
-Get a list of all Tools.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.tools.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3045,7 +3131,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -3053,7 +3139,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -3061,7 +3147,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -3069,7 +3155,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
@@ -3077,7 +3163,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -3097,7 +3183,7 @@ for page in response.iter_pages():
-client.tools.upsert(...)
+client.tools.update(...)
-
@@ -3109,13 +3195,9 @@ for page in response.iter_pages():
-
-Create a Tool or update it with a new version if it already exists.
-
-Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+Update a Log.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Tool - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Update the details of a Log with the given ID.
@@ -3135,19 +3217,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
+client.tools.update(
+ id="id",
+ log_id="log_id",
)
```
@@ -3164,7 +3236,7 @@ client.tools.upsert(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Prompt.
@@ -3172,7 +3244,7 @@ client.tools.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**log_id:** `str` — Unique identifier for the Log.
@@ -3180,7 +3252,7 @@ client.tools.upsert(
-
-**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -3188,7 +3260,7 @@ client.tools.upsert(
-
-**source_code:** `typing.Optional[str]` — Code source of the Tool.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -3196,7 +3268,7 @@ client.tools.upsert(
-
-**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -3204,7 +3276,7 @@ client.tools.upsert(
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -3212,7 +3284,7 @@ client.tools.upsert(
-
-**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -3220,7 +3292,7 @@ client.tools.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -3228,7 +3300,7 @@ client.tools.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the Version.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -3236,72 +3308,31 @@ client.tools.upsert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
-
-
-
-
-
-
-
-client.tools.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
-Retrieve the Tool with the given ID.
-
-By default, the deployed version of the Tool is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Tool.
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get(
- id="tl_789ghi",
-)
-
-```
-
-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -3309,7 +3340,7 @@ client.tools.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -3317,7 +3348,7 @@ client.tools.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -3337,7 +3368,7 @@ client.tools.get(
-client.tools.delete(...)
+client.tools.list(...)
-
@@ -3349,7 +3380,7 @@ client.tools.get(
-
-Delete the Tool with the given ID.
+Get a list of all Tools.
@@ -3369,9 +3400,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete(
- id="tl_789ghi",
+response = client.tools.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -3387,7 +3423,7 @@ client.tools.delete(
-
-**id:** `str` — Unique identifier for Tool.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -3395,70 +3431,23 @@ client.tools.delete(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
-
-
-
-
-
-
-
-client.tools.move(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Move the Tool to a different path or change the name.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
-)
-
-```
-
-
+**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
@@ -3466,7 +3455,7 @@ client.tools.move(
-
-**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
@@ -3474,7 +3463,7 @@ client.tools.move(
-
-**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -3494,7 +3483,7 @@ client.tools.move(
-client.tools.list_versions(...)
+client.tools.upsert(...)
-
@@ -3506,7 +3495,13 @@ client.tools.move(
-
-Get a list of all the versions of a Tool.
+Create a Tool or update it with a new version if it already exists.
+
+Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Tool - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -3526,8 +3521,19 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_versions(
- id="tl_789ghi",
+client.tools.upsert(
+ path="math-tool",
+ function={
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
+ "required": ["a", "b"],
+ },
+ },
+ version_name="math-tool-v1",
+ version_description="Simple math tool that multiplies two numbers",
)
```
@@ -3544,7 +3550,7 @@ client.tools.list_versions(
-
-**id:** `str` — Unique identifier for the Tool.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -3552,7 +3558,63 @@ client.tools.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**id:** `typing.Optional[str]` — ID for an existing Tool.
+
+
+
+
+
+-
+
+**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+
+
+
+
+
+-
+
+**source_code:** `typing.Optional[str]` — Code source of the Tool.
+
+
+
+
+
+-
+
+**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+
+
+
+
+
+-
+
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+
+
+
+
+
+-
+
+**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -3572,7 +3634,7 @@ client.tools.list_versions(
-client.tools.delete_tool_version(...)
+client.tools.get(...)
-
@@ -3584,7 +3646,10 @@ client.tools.list_versions(
-
-Delete a version of the Tool.
+Retrieve the Tool with the given ID.
+
+By default, the deployed version of the Tool is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Tool.
@@ -3604,9 +3669,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
+client.tools.get(
+ id="tl_789ghi",
)
```
@@ -3631,7 +3695,15 @@ client.tools.delete_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -3651,7 +3723,7 @@ client.tools.delete_tool_version(
-client.tools.update_tool_version(...)
+client.tools.delete(...)
-
@@ -3663,7 +3735,7 @@ client.tools.delete_tool_version(
-
-Update the name or description of the Tool version.
+Delete the Tool with the given ID.
@@ -3683,9 +3755,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_tool_version(
- id="id",
- version_id="version_id",
+client.tools.delete(
+ id="tl_789ghi",
)
```
@@ -3710,30 +3781,6 @@ client.tools.update_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
-
-
-
-
-
--
-
-**description:** `typing.Optional[str]` — Description of the version.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3746,7 +3793,7 @@ client.tools.update_tool_version(
-client.tools.set_deployment(...)
+client.tools.move(...)
-
@@ -3758,10 +3805,7 @@ client.tools.update_tool_version(
-
-Deploy Tool to an Environment.
-
-Set the deployed version for the specified Environment. This Prompt
-will be used for calls made to the Tool in this Environment.
+Move the Tool to a different path or change the name.
@@ -3781,10 +3825,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.set_deployment(
+client.tools.move(
id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
+ path="new directory/new name",
)
```
@@ -3809,7 +3852,7 @@ client.tools.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
@@ -3817,7 +3860,7 @@ client.tools.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
@@ -3837,7 +3880,7 @@ client.tools.set_deployment(
-client.tools.remove_deployment(...)
+client.tools.list_versions(...)
-
@@ -3849,10 +3892,7 @@ client.tools.set_deployment(
-
-Remove deployed Tool from the Environment.
-
-Remove the deployed version for the specified Environment. This Tool
-will no longer be used for calls made to the Tool in this Environment.
+Get a list of all the versions of a Tool.
@@ -3872,9 +3912,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.remove_deployment(
+client.tools.list_versions(
id="tl_789ghi",
- environment_id="staging",
)
```
@@ -3891,7 +3930,7 @@ client.tools.remove_deployment(
-
-**id:** `str` — Unique identifier for Tool.
+**id:** `str` — Unique identifier for the Tool.
@@ -3899,7 +3938,7 @@ client.tools.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -3919,7 +3958,7 @@ client.tools.remove_deployment(
-client.tools.list_environments(...)
+client.tools.delete_tool_version(...)
-
@@ -3931,7 +3970,7 @@ client.tools.remove_deployment(
-
-List all Environments and their deployed versions for the Tool.
+Delete a version of the Tool.
@@ -3951,8 +3990,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_environments(
- id="tl_789ghi",
+client.tools.delete_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -3977,6 +4017,14 @@ client.tools.list_environments(
-
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3989,7 +4037,7 @@ client.tools.list_environments(
-client.tools.update_monitoring(...)
+client.tools.update_tool_version(...)
-
@@ -4001,10 +4049,7 @@ client.tools.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Tool.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Tool for monitoring purposes.
+Update the name or description of the Tool version.
@@ -4024,9 +4069,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.tools.update_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -4043,7 +4088,7 @@ client.tools.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Tool.
@@ -4051,9 +4096,7 @@ client.tools.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4061,9 +4104,15 @@ client.tools.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -4083,8 +4132,7 @@ client.tools.update_monitoring(
-## Datasets
-client.datasets.list(...)
+client.tools.set_deployment(...)
-
@@ -4096,7 +4144,10 @@ client.tools.update_monitoring(
-
-List all Datasets.
+Deploy Tool to an Environment.
+
+Set the deployed version for the specified Environment. This Prompt
+will be used for calls made to the Tool in this Environment.
@@ -4116,14 +4167,11 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list(
- size=1,
+client.tools.set_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
+ version_id="tv_012jkl",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4139,7 +4187,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for Tool.
@@ -4147,7 +4195,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -4155,7 +4203,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4163,47 +4211,40 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
-
+
+client.tools.remove_deployment(...)
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
-
-
+#### 📝 Description
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+
+-
+Remove deployed Tool from the Environment.
+Remove the deployed version for the specified Environment. This Tool
+will no longer be used for calls made to the Tool in this Environment.
+
+
-
-
-client.datasets.upsert(...)
-
--
-#### 📝 Description
+#### 🔌 Usage
-
@@ -4211,70 +4252,15 @@ for page in response.iter_pages():
-
-Create a Dataset or update it with a new version if it already exists.
-
-Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
-
-By default, the new Dataset version will be set to the list of Datapoints provided in
-the request. You can also create a new version by adding or removing Datapoints from an existing version
-by specifying `action` as `add` or `remove` respectively. In this case, you may specify
-the `version_id` or `environment` query parameters to identify the existing version to base
-the new version on. If neither is provided, the latest created version will be used.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Dataset - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
-
-Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
-exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
-you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
+```python
+from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upsert(
- path="datasets/support-queries",
- datapoints=[
- {
- "messages": [
- {
- "role": "user",
- "content": "How do i manage my organizations API keys?\n",
- }
- ],
- "target": {
- "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
- },
- },
- {
- "messages": [
- {
- "role": "user",
- "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
- }
- ],
- "target": {
- "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
- },
- },
- ],
- version_name="Initial version",
- version_description="Add two new questions and answers",
+client.tools.remove_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
)
```
@@ -4291,7 +4277,7 @@ client.datasets.upsert(
-
-**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
+**id:** `str` — Unique identifier for Tool.
@@ -4299,7 +4285,7 @@ client.datasets.upsert(
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -4307,71 +4293,69 @@ client.datasets.upsert(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
+client.tools.list_environments(...)
-
-**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
+#### 📝 Description
-
-**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
+-
+
+List all Environments and their deployed versions for the Tool.
+
+
+#### 🔌 Usage
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+
+-
-The action to take with the provided Datapoints.
+```python
+from humanloop import Humanloop
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.list_environments(
+ id="tl_789ghi",
+)
-If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+```
-
-
--
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+#### ⚙️ Parameters
+
-
-**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
-
-
-
-
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**id:** `str` — Unique identifier for Tool.
@@ -4391,7 +4375,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-client.datasets.get(...)
+client.tools.update_monitoring(...)
-
@@ -4403,15 +4387,10 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-
-Retrieve the Dataset with the given ID.
-
-Unless `include_datapoints` is set to `true`, the response will not include
-the Datapoints.
-Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
-retrieve Datapoints for a large Dataset.
+Activate and deactivate Evaluators for monitoring the Tool.
-By default, the deployed version of the Dataset is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Dataset.
+An activated Evaluator will automatically be run on all new Logs
+within the Tool for monitoring purposes.
@@ -4431,10 +4410,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
+client.tools.update_monitoring(
+ id="tl_789ghi",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
)
```
@@ -4451,15 +4429,7 @@ client.datasets.get(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+**id:** `str`
@@ -4467,7 +4437,9 @@ client.datasets.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -4475,7 +4447,9 @@ client.datasets.get(
-
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -4495,24 +4469,10 @@ client.datasets.get(
-client.datasets.delete(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.get_environment_variables(...)
-
-Delete the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4527,7 +4487,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete(
+client.tools.get_environment_variables(
id="id",
)
@@ -4545,7 +4505,7 @@ client.datasets.delete(
-
-**id:** `str` — Unique identifier for Dataset.
+**id:** `str` — Unique identifier for File.
@@ -4565,7 +4525,7 @@ client.datasets.delete(
-client.datasets.move(...)
+client.tools.add_environment_variable(...)
-
@@ -4577,7 +4537,7 @@ client.datasets.delete(
-
-Move the Dataset to a different path or change the name.
+Add an environment variable to a Tool.
@@ -4597,8 +4557,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.move(
+client.tools.add_environment_variable(
id="id",
+ request=[{"name": "name", "value": "value"}],
)
```
@@ -4615,15 +4576,7 @@ client.datasets.move(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Tool.
@@ -4631,7 +4584,7 @@ client.datasets.move(
-
-**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
@@ -4651,24 +4604,10 @@ client.datasets.move(
-client.datasets.list_datapoints(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.delete_environment_variable(...)
-
-List all Datapoints for the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4683,15 +4622,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
+client.tools.delete_environment_variable(
+ id="id",
+ name="name",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4707,31 +4641,7 @@ for page in response.iter_pages():
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
-
-
-
-
--
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
-
-
-
-
--
-
-**page:** `typing.Optional[int]` — Page number for pagination.
+**id:** `str` — Unique identifier for File.
@@ -4739,7 +4649,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+**name:** `str` — Name of the Environment Variable to delete.
@@ -4759,7 +4669,8 @@ for page in response.iter_pages():
-client.datasets.list_versions(...)
+## Datasets
+client.datasets.list(...)
-
@@ -4771,7 +4682,7 @@ for page in response.iter_pages():
-
-Get a list of the versions for a Dataset.
+List all Datasets.
@@ -4791,9 +4702,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_versions(
- id="ds_b0baF1ca7652",
+response = client.datasets.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -4809,7 +4725,7 @@ client.datasets.list_versions(
-
-**id:** `str` — Unique identifier for Dataset.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -4817,7 +4733,39 @@ client.datasets.list_versions(
-
-**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -4837,7 +4785,7 @@ client.datasets.list_versions(
-client.datasets.delete_dataset_version(...)
+client.datasets.upsert(...)
-
@@ -4849,7 +4797,23 @@ client.datasets.list_versions(
-
-Delete a version of the Dataset.
+Create a Dataset or update it with a new version if it already exists.
+
+Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
+
+By default, the new Dataset version will be set to the list of Datapoints provided in
+the request. You can also create a new version by adding or removing Datapoints from an existing version
+by specifying `action` as `add` or `remove` respectively. In this case, you may specify
+the `version_id` or `environment` query parameters to identify the existing version to base
+the new version on. If neither is provided, the latest created version will be used.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Dataset - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
+exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
+you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
@@ -4869,9 +4833,34 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
+client.datasets.upsert(
+ path="datasets/support-queries",
+ datapoints=[
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "How do i manage my organizations API keys?\n",
+ }
+ ],
+ "target": {
+ "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
+ },
+ },
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
+ }
+ ],
+ "target": {
+ "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
+ },
+ },
+ ],
+ version_name="Initial version",
+ version_description="Add two new questions and answers",
)
```
@@ -4888,7 +4877,7 @@ client.datasets.delete_dataset_version(
-
-**id:** `str` — Unique identifier for Dataset.
+**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
@@ -4896,7 +4885,7 @@ client.datasets.delete_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
@@ -4904,70 +4893,47 @@ client.datasets.delete_dataset_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
-
-
-
-
-
-
-client.datasets.update_dataset_version(...)
-
-#### 📝 Description
-
-
--
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+
+
+
-
-Update the name or description of the Dataset version.
-
-
+**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**id:** `typing.Optional[str]` — ID for an existing Dataset.
+
-#### ⚙️ Parameters
-
-
-
--
+**action:** `typing.Optional[UpdateDatesetAction]`
-**id:** `str` — Unique identifier for Dataset.
+The action to take with the provided Datapoints.
+
+ - If `"set"`, the created version will only contain the Datapoints provided in this request.
+ - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+ - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+
+If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
@@ -4975,7 +4941,7 @@ client.datasets.update_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
@@ -4983,7 +4949,7 @@ client.datasets.update_dataset_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
@@ -4991,7 +4957,7 @@ client.datasets.update_dataset_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -5011,7 +4977,7 @@ client.datasets.update_dataset_version(
-client.datasets.upload_csv(...)
+client.datasets.get(...)
-
@@ -5023,17 +4989,15 @@ client.datasets.update_dataset_version(
-
-Add Datapoints from a CSV file to a Dataset.
-
-This will create a new version of the Dataset with the Datapoints from the CSV file.
+Retrieve the Dataset with the given ID.
-If either `version_id` or `environment` is provided, the new version will be based on the specified version,
-with the Datapoints from the CSV file added to the existing Datapoints in the version.
-If neither `version_id` nor `environment` is provided, the new version will be based on the version
-of the Dataset that is deployed to the default Environment.
+Unless `include_datapoints` is set to `true`, the response will not include
+the Datapoints.
+Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
+retrieve Datapoints for a large Dataset.
-You can optionally provide a name and description for the new version using `version_name`
-and `version_description` parameters.
+By default, the deployed version of the Dataset is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Dataset.
@@ -5053,8 +5017,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upload_csv(
- id="id",
+client.datasets.get(
+ id="ds_b0baF1ca7652",
+ version_id="dsv_6L78pqrdFi2xa",
+ include_datapoints=True,
)
```
@@ -5071,25 +5037,7 @@ client.datasets.upload_csv(
-
-**id:** `str` — Unique identifier for the Dataset
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
+**id:** `str` — Unique identifier for Dataset.
@@ -5097,7 +5045,7 @@ core.File` — See core.File for more documentation
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
@@ -5105,7 +5053,7 @@ core.File` — See core.File for more documentation
-
-**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -5113,7 +5061,7 @@ core.File` — See core.File for more documentation
-
-**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
@@ -5133,7 +5081,7 @@ core.File` — See core.File for more documentation
-client.datasets.set_deployment(...)
+client.datasets.delete(...)
-
@@ -5145,9 +5093,7 @@ core.File` — See core.File for more documentation
-
-Deploy Dataset to Environment.
-
-Set the deployed version for the specified Environment.
+Delete the Dataset with the given ID.
@@ -5167,10 +5113,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
+client.datasets.delete(
+ id="id",
)
```
@@ -5195,22 +5139,6 @@ client.datasets.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5223,7 +5151,7 @@ client.datasets.set_deployment(
-client.datasets.remove_deployment(...)
+client.datasets.move(...)
-
@@ -5235,9 +5163,7 @@ client.datasets.set_deployment(
-
-Remove deployed Dataset from Environment.
-
-Remove the deployed version for the specified Environment.
+Move the Dataset to a different path or change the name.
@@ -5257,9 +5183,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
+client.datasets.move(
+ id="id",
)
```
@@ -5284,7 +5209,15 @@ client.datasets.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
@@ -5304,7 +5237,7 @@ client.datasets.remove_deployment(
-client.datasets.list_environments(...)
+client.datasets.list_datapoints(...)
-
@@ -5316,7 +5249,7 @@ client.datasets.remove_deployment(
-
-List all Environments and their deployed versions for the Dataset.
+List all Datapoints for the Dataset with the given ID.
@@ -5336,9 +5269,15 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_environments(
- id="id",
+response = client.datasets.list_datapoints(
+ id="ds_b0baF1ca7652",
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -5362,6 +5301,38 @@ client.datasets.list_environments(
-
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5374,8 +5345,7 @@ client.datasets.list_environments(
-## Evaluators
-client.evaluators.log(...)
+client.datasets.list_versions(...)
-
@@ -5387,9 +5357,7 @@ client.datasets.list_environments(
-
-Submit Evaluator judgment for an existing Log.
-
-Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+Get a list of the versions for a Dataset.
@@ -5409,8 +5377,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.log(
- parent_id="parent_id",
+client.datasets.list_versions(
+ id="ds_b0baF1ca7652",
)
```
@@ -5427,7 +5395,7 @@ client.evaluators.log(
-
-**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
+**id:** `str` — Unique identifier for Dataset.
@@ -5435,7 +5403,7 @@ client.evaluators.log(
-
-**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
+**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
@@ -5443,103 +5411,70 @@ client.evaluators.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
--
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
+client.datasets.delete_dataset_version(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
-
-
+#### 📝 Description
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
-
-
-
-
-**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+Delete a version of the Dataset.
-
-
--
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
-
-
+```python
+from humanloop import Humanloop
-
--
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.delete_dataset_version(
+ id="id",
+ version_id="version_id",
+)
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+```
-
-
--
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+#### ⚙️ Parameters
+
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
-
-
-
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `str` — Unique identifier for Dataset.
@@ -5547,7 +5482,7 @@ client.evaluators.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5555,71 +5490,70 @@ client.evaluators.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
+client.datasets.update_dataset_version(...)
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
-
-
+#### 📝 Description
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
-
-
-
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+Update the name or description of the Dataset version.
+
+
+
+#### 🔌 Usage
-
-**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
-
-
-
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.update_dataset_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
-
-
-
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+**id:** `str` — Unique identifier for Dataset.
@@ -5627,7 +5561,7 @@ client.evaluators.log(
-
-**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5635,7 +5569,7 @@ client.evaluators.log(
-
-**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -5643,7 +5577,7 @@ client.evaluators.log(
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+**description:** `typing.Optional[str]` — Description of the version.
@@ -5663,7 +5597,7 @@ client.evaluators.log(
-client.evaluators.list(...)
+client.datasets.upload_csv(...)
-
@@ -5675,7 +5609,17 @@ client.evaluators.log(
-
-Get a list of all Evaluators.
+Add Datapoints from a CSV file to a Dataset.
+
+This will create a new version of the Dataset with the Datapoints from the CSV file.
+
+If either `version_id` or `environment` is provided, the new version will be based on the specified version,
+with the Datapoints from the CSV file added to the existing Datapoints in the version.
+If neither `version_id` nor `environment` is provided, the new version will be based on the version
+of the Dataset that is deployed to the default Environment.
+
+You can optionally provide a name and description for the new version using `version_name`
+and `version_description` parameters.
@@ -5695,14 +5639,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.evaluators.list(
- size=1,
+client.datasets.upload_csv(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -5718,7 +5657,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for the Dataset
@@ -5726,7 +5665,9 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+**file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5734,7 +5675,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
@@ -5742,7 +5683,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
@@ -5750,7 +5691,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
@@ -5758,7 +5699,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
@@ -5778,7 +5719,7 @@ for page in response.iter_pages():
-client.evaluators.upsert(...)
+client.datasets.set_deployment(...)
-
@@ -5790,13 +5731,9 @@ for page in response.iter_pages():
-
-Create an Evaluator or update it with a new version if it already exists.
-
-Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+Deploy Dataset to Environment.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within an Evaluator - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Set the deployed version for the specified Environment.
@@ -5816,19 +5753,13 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
-)
-
-```
+client.datasets.set_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
+ version_id="dsv_6L78pqrdFi2xa",
+)
+
+```
@@ -5842,23 +5773,7 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
-
-
--
-
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -5866,7 +5781,7 @@ client.evaluators.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -5874,7 +5789,7 @@ client.evaluators.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5894,7 +5809,7 @@ client.evaluators.upsert(
-client.evaluators.get(...)
+client.datasets.remove_deployment(...)
-
@@ -5906,10 +5821,9 @@ client.evaluators.upsert(
-
-Retrieve the Evaluator with the given ID.
+Remove deployed Dataset from Environment.
-By default, the deployed version of the Evaluator is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Evaluator.
+Remove the deployed version for the specified Environment.
@@ -5929,8 +5843,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.get(
- id="ev_890bcd",
+client.datasets.remove_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
)
```
@@ -5947,15 +5862,7 @@ client.evaluators.get(
-
-**id:** `str` — Unique identifier for Evaluator.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+**id:** `str` — Unique identifier for Dataset.
@@ -5963,7 +5870,7 @@ client.evaluators.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -5983,7 +5890,7 @@ client.evaluators.get(
-client.evaluators.delete(...)
+client.datasets.list_environments(...)
-
@@ -5995,7 +5902,7 @@ client.evaluators.get(
-
-Delete the Evaluator with the given ID.
+List all Environments and their deployed versions for the Dataset.
@@ -6015,8 +5922,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.delete(
- id="ev_890bcd",
+client.datasets.list_environments(
+ id="id",
)
```
@@ -6033,7 +5940,7 @@ client.evaluators.delete(
-
-**id:** `str` — Unique identifier for Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -6053,7 +5960,8 @@ client.evaluators.delete(
-client.evaluators.move(...)
+## Evaluators
+client.evaluators.log(...)
-
@@ -6065,7 +5973,9 @@ client.evaluators.delete(
-
-Move the Evaluator to a different path or change the name.
+Submit Evaluator judgment for an existing Log.
+
+Creates a new Log. The evaluated Log will be set as the parent of the created Log.
@@ -6085,9 +5995,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
+client.evaluators.log(
+ parent_id="parent_id",
)
```
@@ -6104,7 +6013,7 @@ client.evaluators.move(
-
-**id:** `str` — Unique identifier for Evaluator.
+**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
@@ -6112,7 +6021,7 @@ client.evaluators.move(
-
-**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
@@ -6120,7 +6029,7 @@ client.evaluators.move(
-
-**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6128,69 +6037,3435 @@ client.evaluators.move(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+-
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
-
-client.evaluators.list_versions(...)
-
-#### 📝 Description
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
-
+**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
+
+
+
+
-
-Get a list of all the versions of an Evaluator.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
-#### 🔌 Usage
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
-
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_versions(
- id="ev_890bcd",
-)
+
+-
-```
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
-#### ⚙️ Parameters
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+
+
+
+
+
+-
+
+**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+
+
+
+
+
+-
+
+**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+
+
+
+
+
+-
+
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+
+
+
-
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.evaluators.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page offset for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Evaluator or update it with a new version if it already exists.
+
+Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Evaluator - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.upsert(
+ path="Shared Evaluators/Accuracy Evaluator",
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "number",
+ "evaluator_type": "python",
+ "code": "def evaluate(answer, target):\n return 0.5",
+ },
+ version_name="simple-evaluator",
+ version_description="Simple evaluator that returns 0.5",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**spec:** `EvaluatorRequestSpecParams`
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Evaluator with the given ID.
+
+By default, the deployed version of the Evaluator is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.get(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Evaluator with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Evaluator to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.move(
+ id="ev_890bcd",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of an Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_versions(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for the Evaluator.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.delete_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_evaluator_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Evaluator version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Evaluator to an Environment.
+
+Set the deployed version for the specified Environment. This Evaluator
+will be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.set_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+ version_id="evv_012def",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Evaluator from the Environment.
+
+Remove the deployed version for the specified Environment. This Evaluator
+will no longer be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.remove_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_environments(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Evaluator.
+
+An activated Evaluator will automatically be run on all new Logs
+within the Evaluator for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_monitoring(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Flows
+client.flows.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Log to a Flow.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
+If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.log(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ flow={
+ "attributes": {
+ "prompt": {
+ "template": "You are a helpful assistant helping with medical anamnesis",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ }
+ },
+ inputs={
+ "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="incomplete",
+ start_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:35+00:00",
+ ),
+ end_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:39+00:00",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the status, inputs, output of a Flow Log.
+
+Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
+Inputs and output (or error) must be provided in order to mark it as complete.
+
+The end_time log attribute will be set to match the time the log is marked as complete.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_log(
+ log_id="medqa_experiment_0001",
+ inputs={
+ "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — Unique identifier of the Flow Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Flow with the given ID.
+
+By default, the deployed version of the Flow is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.get(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Flow with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Flow to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.move(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Flow.
+
+
+
+
+
+-
+
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of Flows.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.flows.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create or update a Flow.
+
+Flows can also be identified by the `ID` or their `path`.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Flow - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.upsert(
+ path="Personal Projects/MedQA Flow",
+ attributes={
+ "prompt": {
+ "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ "version_name": "medqa-flow-v1",
+ "version_description": "Initial version",
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of a Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_versions(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Flow version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Flow to an Environment.
+
+Set the deployed version for the specified Environment. This Flow
+will be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.set_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+ version_id="flv_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Flow from the Environment.
+
+Remove the deployed version for the specified Environment. This Flow
+will no longer be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.remove_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_environments(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Flow.
+
+An activated Evaluator will automatically be run on all new "completed" Logs
+within the Flow for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_monitoring(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Agents
+client.agents.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Agent Log.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.log()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
+
+
+
+
+
+-
+
+**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
+
+
+
+
+
+-
+
+**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
+
+
+
+
+
+-
+
+**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
+
+
+
+
+
+-
+
+**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
+
+
+
+
+
+-
+
+**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
+
+
+
+
+
+-
+
+**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.update_log(
+ id="id",
+ log_id="log_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.call_stream()
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
-
-**id:** `str` — Unique identifier for the Evaluator.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -6198,7 +9473,7 @@ client.evaluators.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -6206,70 +9481,71 @@ client.evaluators.list_versions(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
+
+-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
-
-client.evaluators.delete_evaluator_version(...)
-
-#### 📝 Description
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
-
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
-
-Delete a version of the Evaluator.
-
-
+**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
-#### 🔌 Usage
-
-
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
-#### ⚙️ Parameters
-
-
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6277,7 +9553,7 @@ client.evaluators.delete_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6297,7 +9573,7 @@ client.evaluators.delete_evaluator_version(
-client.evaluators.update_evaluator_version(...)
+client.agents.call(...)
-
@@ -6309,7 +9585,18 @@ client.evaluators.delete_evaluator_version(
-
-Update the name or description of the Evaluator version.
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
@@ -6329,10 +9616,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
-)
+client.agents.call()
```
@@ -6348,7 +9632,7 @@ client.evaluators.update_evaluator_version(
-
-**id:** `str` — Unique identifier for Evaluator.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
@@ -6356,7 +9640,7 @@ client.evaluators.update_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6364,7 +9648,7 @@ client.evaluators.update_evaluator_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6372,7 +9656,7 @@ client.evaluators.update_evaluator_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6380,74 +9664,58 @@ client.evaluators.update_evaluator_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
-
-
-
-
-
-
-
-client.evaluators.set_deployment(...)
-
-#### 📝 Description
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
-
--
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
-
-Deploy Evaluator to an Environment.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
-Set the deployed version for the specified Environment. This Evaluator
-will be used for calls made to the Evaluator in this Environment.
-
-
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
-)
-
-```
-
-
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
-#### ⚙️ Parameters
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
@@ -6455,7 +9723,7 @@ client.evaluators.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -6463,7 +9731,7 @@ client.evaluators.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -6471,73 +9739,71 @@ client.evaluators.set_deployment(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
+
+-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
-
-client.evaluators.remove_deployment(...)
-
-#### 📝 Description
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
-
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
-
-Remove deployed Evaluator from the Environment.
-
-Remove the deployed version for the specified Environment. This Evaluator
-will no longer be used for calls made to the Evaluator in this Environment.
-
-
+**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
-#### 🔌 Usage
-
-
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
-)
-
-```
-
-
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
-#### ⚙️ Parameters
-
-
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
@@ -6545,7 +9811,7 @@ client.evaluators.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
@@ -6565,7 +9831,7 @@ client.evaluators.remove_deployment(
-client.evaluators.list_environments(...)
+client.agents.continue_stream(...)
-
@@ -6577,7 +9843,15 @@ client.evaluators.remove_deployment(
-
-List all Environments and their deployed versions for the Evaluator.
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -6597,9 +9871,12 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_environments(
- id="ev_890bcd",
+response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
+for chunk in response.data:
+ yield chunk
```
@@ -6615,7 +9892,31 @@ client.evaluators.list_environments(
-
-**id:** `str` — Unique identifier for Evaluator.
+**log_id:** `str` — This identifies the Agent Log to continue.
+
+
+
+
+
+-
+
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6635,7 +9936,7 @@ client.evaluators.list_environments(
-client.evaluators.update_monitoring(...)
+client.agents.continue_(...)
-
@@ -6647,10 +9948,15 @@ client.evaluators.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Evaluator.
+Continue an incomplete Agent call.
-An activated Evaluator will automatically be run on all new Logs
-within the Evaluator for monitoring purposes.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -6670,8 +9976,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_monitoring(
- id="id",
+client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
```
@@ -6688,7 +9995,7 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6696,9 +10003,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6706,9 +10011,15 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6728,8 +10039,7 @@ client.evaluators.update_monitoring(
-## Flows
-client.flows.log(...)
+client.agents.list(...)
-
@@ -6741,13 +10051,7 @@ client.evaluators.update_monitoring(
-
-Log to a Flow.
-
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
+Get a list of all Agents.
@@ -6762,41 +10066,12 @@ in order to trigger Evaluators.
-
```python
-import datetime
-
from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
- ),
-)
+client.agents.list()
```
@@ -6812,7 +10087,7 @@ client.flows.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+**page:** `typing.Optional[int]` — Page number for pagination.
@@ -6820,7 +10095,7 @@ client.flows.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
@@ -6828,7 +10103,7 @@ client.flows.log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
@@ -6836,7 +10111,7 @@ client.flows.log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -6844,7 +10119,7 @@ client.flows.log(
-
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -6852,7 +10127,7 @@ client.flows.log(
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6860,71 +10135,76 @@ client.flows.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
-
+
+client.agents.upsert(...)
-
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
-
-
-
+#### 📝 Description
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
-
-
-
-
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+Create an Agent or update it with a new version if it already exists.
+
+Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+tools determine the versions of the Agent.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Agent - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.upsert(
+ model="model",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
-
-
-
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -6932,7 +10212,7 @@ client.flows.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6940,7 +10220,7 @@ client.flows.log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -6948,7 +10228,7 @@ client.flows.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -6956,7 +10236,14 @@ client.flows.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**template:** `typing.Optional[AgentRequestTemplateParams]`
+
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+For completion models, provide a prompt template as a string.
+
+Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -6964,7 +10251,7 @@ client.flows.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -6972,7 +10259,7 @@ client.flows.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -6980,7 +10267,7 @@ client.flows.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
@@ -6988,7 +10275,7 @@ client.flows.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
@@ -6996,7 +10283,7 @@ client.flows.log(
-
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
@@ -7004,7 +10291,7 @@ client.flows.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
@@ -7012,7 +10299,7 @@ client.flows.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
@@ -7020,7 +10307,7 @@ client.flows.log(
-
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
@@ -7028,79 +10315,47 @@ client.flows.log(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
-
-
+
+-
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
-
-client.flows.update_log(...)
-
-#### 📝 Description
-
-
--
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+
+
-
-Update the status, inputs, output of a Flow Log.
-
-Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
-Inputs and output (or error) must be provided in order to mark it as complete.
-
-The end_time log attribute will be set to match the time the log is marked as complete.
-
-
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
-)
-
-```
-
-
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
+
-#### ⚙️ Parameters
-
-
--
-
-
-**log_id:** `str` — Unique identifier of the Flow Log.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
@@ -7108,7 +10363,7 @@ client.flows.update_log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
@@ -7116,7 +10371,7 @@ client.flows.update_log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
@@ -7124,7 +10379,7 @@ client.flows.update_log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -7132,7 +10387,7 @@ client.flows.update_log(
-
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7140,7 +10395,7 @@ client.flows.update_log(
-
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7148,7 +10403,7 @@ client.flows.update_log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7168,7 +10423,7 @@ client.flows.update_log(
-client.flows.get(...)
+client.agents.delete_agent_version(...)
-
@@ -7180,10 +10435,7 @@ client.flows.update_log(
-
-Retrieve the Flow with the given ID.
-
-By default, the deployed version of the Flow is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Flow.
+Delete a version of the Agent.
@@ -7203,8 +10455,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7221,15 +10474,7 @@ client.flows.get(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+**id:** `str` — Unique identifier for Agent.
@@ -7237,7 +10482,7 @@ client.flows.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7257,7 +10502,7 @@ client.flows.get(
-client.flows.delete(...)
+client.agents.patch_agent_version(...)
-
@@ -7269,7 +10514,7 @@ client.flows.get(
-
-Delete the Flow with the given ID.
+Update the name or description of the Agent version.
@@ -7289,8 +10534,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7307,7 +10553,31 @@ client.flows.delete(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -7327,7 +10597,7 @@ client.flows.delete(
-client.flows.move(...)
+client.agents.get(...)
-
@@ -7339,7 +10609,10 @@ client.flows.delete(
-
-Move the Flow to a different path or change the name.
+Retrieve the Agent with the given ID.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -7359,9 +10632,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
+client.agents.get(
+ id="id",
)
```
@@ -7378,15 +10650,7 @@ client.flows.move(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Agent.
@@ -7394,7 +10658,7 @@ client.flows.move(
-
-**name:** `typing.Optional[str]` — Name of the Flow.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
@@ -7402,7 +10666,7 @@ client.flows.move(
-
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -7422,7 +10686,7 @@ client.flows.move(
-client.flows.list(...)
+client.agents.delete(...)
-
@@ -7434,7 +10698,7 @@ client.flows.move(
-
-Get a list of Flows.
+Delete the Agent with the given ID.
@@ -7454,14 +10718,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.flows.list(
- size=1,
+client.agents.delete(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -7477,47 +10736,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
--
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
-
-
-
-
--
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
-
-
-
-
--
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
-
-
-
-
--
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10756,7 @@ for page in response.iter_pages():
-client.flows.upsert(...)
+client.agents.move(...)
-
@@ -7549,13 +10768,7 @@ for page in response.iter_pages():
-
-Create or update a Flow.
-
-Flows can also be identified by the `ID` or their `path`.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Flow - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Move the Agent to a different path or change the name.
@@ -7575,22 +10788,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
+client.agents.move(
+ id="id",
)
```
@@ -7607,15 +10806,7 @@ client.flows.upsert(
-
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10814,7 @@ client.flows.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10822,7 @@ client.flows.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10830,7 @@ client.flows.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10850,7 @@ client.flows.upsert(
-client.flows.list_versions(...)
+client.agents.list_versions(...)
-
@@ -7671,7 +10862,7 @@ client.flows.upsert(
-
-Get a list of all the versions of a Flow.
+Get a list of all the versions of a Agent.
@@ -7691,8 +10882,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.list_versions(
+ id="id",
)
```
@@ -7709,7 +10900,7 @@ client.flows.list_versions(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +10928,7 @@ client.flows.list_versions(
-client.flows.delete_flow_version(...)
+client.agents.set_deployment(...)
-
@@ -7749,7 +10940,10 @@ client.flows.list_versions(
-
-Delete a version of the Flow.
+Deploy Agent to an Environment.
+
+Set the deployed version for the specified Environment. This Agent
+will be used for calls made to the Agent in this Environment.
@@ -7769,8 +10963,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete_flow_version(
+client.agents.set_deployment(
id="id",
+ environment_id="environment_id",
version_id="version_id",
)
@@ -7788,7 +10983,7 @@ client.flows.delete_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +10991,15 @@ client.flows.delete_flow_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +11019,7 @@ client.flows.delete_flow_version(
-client.flows.update_flow_version(...)
+client.agents.remove_deployment(...)
-
@@ -7828,7 +11031,10 @@ client.flows.delete_flow_version(
-
-Update the name or description of the Flow version.
+Remove deployed Agent from the Environment.
+
+Remove the deployed version for the specified Environment. This Agent
+will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11054,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_flow_version(
+client.agents.remove_deployment(
id="id",
- version_id="version_id",
+ environment_id="environment_id",
)
```
@@ -7867,23 +11073,7 @@ client.flows.update_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
+**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11081,7 @@ client.flows.update_flow_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11101,7 @@ client.flows.update_flow_version(
-client.flows.set_deployment(...)
+client.agents.list_environments(...)
-
@@ -7923,10 +11113,7 @@ client.flows.update_flow_version(
-
-Deploy Flow to an Environment.
-
-Set the deployed version for the specified Environment. This Flow
-will be used for calls made to the Flow in this Environment.
+List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11133,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
+client.agents.list_environments(
+ id="id",
)
```
@@ -7966,23 +11151,7 @@ client.flows.set_deployment(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11171,7 @@ client.flows.set_deployment(
-client.flows.remove_deployment(...)
+client.agents.update_monitoring(...)
-
@@ -8014,10 +11183,10 @@ client.flows.set_deployment(
-
-Remove deployed Flow from the Environment.
+Activate and deactivate Evaluators for monitoring the Agent.
-Remove the deployed version for the specified Environment. This Flow
-will no longer be used for calls made to the Flow in this Environment.
+An activated Evaluator will automatically be run on all new Logs
+within the Agent for monitoring purposes.
@@ -8037,9 +11206,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
+client.agents.update_monitoring(
+ id="id",
)
```
@@ -8056,7 +11224,7 @@ client.flows.remove_deployment(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str`
@@ -8064,7 +11232,19 @@ client.flows.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11264,7 @@ client.flows.remove_deployment(
-client.flows.list_environments(...)
+client.agents.serialize(...)
-
@@ -8096,7 +11276,13 @@ client.flows.remove_deployment(
-
-List all Environments and their deployed versions for the Flow.
+Serialize an Agent to the .agent file format.
+
+Useful for storing the Agent with your code in a version control system,
+or for editing with an AI tool.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11302,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.serialize(
+ id="id",
)
```
@@ -8134,7 +11320,23 @@ client.flows.list_environments(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11356,7 @@ client.flows.list_environments(
-client.flows.update_monitoring(...)
+client.agents.deserialize(...)
-
@@ -8166,10 +11368,10 @@ client.flows.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Flow.
+Deserialize an Agent from the .agent file format.
-An activated Evaluator will automatically be run on all new "completed" Logs
-within the Flow for monitoring purposes.
+This returns a subset of the attributes required by an Agent.
+This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11391,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.agents.deserialize(
+ agent="agent",
)
```
@@ -8208,27 +11409,7 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
-
-
-
-
--
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
-
-
-
-
--
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**agent:** `str`
@@ -8742,6 +11923,14 @@ client.files.list_files()
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -10190,7 +13379,7 @@ for page in response.iter_pages():
-
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 0c431892..2ad9d39e 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,16 +1,45 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentCallResponse,
+ AgentCallResponseToolChoice,
+ AgentCallStreamResponse,
+ AgentCallStreamResponsePayload,
AgentConfigResponse,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
+ AgentInlineTool,
+ AgentKernelRequest,
+ AgentKernelRequestReasoningEffort,
+ AgentKernelRequestStop,
+ AgentKernelRequestTemplate,
+ AgentKernelRequestToolsItem,
+ AgentLinkedFileRequest,
+ AgentLinkedFileResponse,
+ AgentLinkedFileResponseFile,
+ AgentLogResponse,
+ AgentLogResponseToolChoice,
+ AgentLogStreamResponse,
+ AgentResponse,
+ AgentResponseReasoningEffort,
+ AgentResponseStop,
+ AgentResponseTemplate,
+ AgentResponseToolsItem,
+ AnthropicRedactedThinkingContent,
+ AnthropicThinkingContent,
BaseModelsUserResponse,
BooleanEvaluatorStatsResponse,
ChatMessage,
ChatMessageContent,
ChatMessageContentItem,
+ ChatMessageThinkingItem,
ChatRole,
ChatToolType,
CodeEvaluatorRequest,
ConfigToolResponse,
+ CreateAgentLogResponse,
CreateDatapointRequest,
CreateDatapointRequestTargetValue,
CreateEvaluatorLogResponse,
@@ -55,10 +84,12 @@
EvaluatorReturnTypeEnum,
EvaluatorVersionId,
EvaluatorsRequest,
+ EventType,
ExternalEvaluatorRequest,
FeedbackType,
FileEnvironmentResponse,
FileEnvironmentResponseFile,
+ FileEnvironmentVariableRequest,
FileId,
FilePath,
FileRequest,
@@ -76,7 +107,9 @@
ImageUrl,
ImageUrlDetail,
InputResponse,
+ LinkedFileRequest,
LinkedToolResponse,
+ ListAgents,
ListDatasets,
ListEvaluators,
ListFlows,
@@ -85,6 +118,7 @@
LlmEvaluatorRequest,
LogResponse,
LogStatus,
+ LogStreamResponse,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -93,15 +127,18 @@
MonitoringEvaluatorVersionRequest,
NumericEvaluatorStatsResponse,
ObservabilityStatus,
+ OnAgentCallEnum,
+ OpenAiReasoningEffort,
OverallStats,
+ PaginatedDataAgentResponse,
PaginatedDataEvaluationLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
PaginatedDataPromptResponse,
PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
PaginatedDatapointResponse,
PaginatedDatasetResponse,
PaginatedEvaluationResponse,
@@ -110,6 +147,7 @@
PlatformAccessEnum,
PopulateTemplateResponse,
PopulateTemplateResponsePopulatedTemplate,
+ PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
ProjectSortBy,
@@ -118,15 +156,16 @@
PromptCallResponseToolChoice,
PromptCallStreamResponse,
PromptKernelRequest,
+ PromptKernelRequestReasoningEffort,
PromptKernelRequestStop,
PromptKernelRequestTemplate,
PromptLogResponse,
PromptLogResponseToolChoice,
PromptResponse,
+ PromptResponseReasoningEffort,
PromptResponseStop,
PromptResponseTemplate,
ProviderApiKeys,
- ReasoningEffort,
ResponseFormat,
ResponseFormatType,
RunStatsResponse,
@@ -139,6 +178,7 @@
TextEvaluatorStatsResponse,
TimeUnit,
ToolCall,
+ ToolCallResponse,
ToolChoice,
ToolFunction,
ToolKernelRequest,
@@ -162,7 +202,29 @@
VersionStatus,
)
from .errors import UnprocessableEntityError
-from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoice,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffort,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStop,
+ AgentRequestStopParams,
+ AgentRequestTemplate,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItem,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoice,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoice,
+ AgentsCallStreamRequestToolChoiceParams,
+)
from .client import AsyncHumanloop, Humanloop
from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
from .environment import HumanloopEnvironment
@@ -186,26 +248,63 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffort,
+ PromptRequestReasoningEffortParams,
PromptRequestStop,
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
from .requests import (
+ AgentCallResponseParams,
+ AgentCallResponseToolChoiceParams,
+ AgentCallStreamResponseParams,
+ AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
+ AgentInlineToolParams,
+ AgentKernelRequestParams,
+ AgentKernelRequestReasoningEffortParams,
+ AgentKernelRequestStopParams,
+ AgentKernelRequestTemplateParams,
+ AgentKernelRequestToolsItemParams,
+ AgentLinkedFileRequestParams,
+ AgentLinkedFileResponseFileParams,
+ AgentLinkedFileResponseParams,
+ AgentLogResponseParams,
+ AgentLogResponseToolChoiceParams,
+ AgentLogStreamResponseParams,
+ AgentResponseParams,
+ AgentResponseReasoningEffortParams,
+ AgentResponseStopParams,
+ AgentResponseTemplateParams,
+ AgentResponseToolsItemParams,
+ AnthropicRedactedThinkingContentParams,
+ AnthropicThinkingContentParams,
BooleanEvaluatorStatsResponseParams,
ChatMessageContentItemParams,
ChatMessageContentParams,
ChatMessageParams,
+ ChatMessageThinkingItemParams,
CodeEvaluatorRequestParams,
+ CreateAgentLogResponseParams,
CreateDatapointRequestParams,
CreateDatapointRequestTargetValueParams,
CreateEvaluatorLogResponseParams,
@@ -245,6 +344,7 @@
ExternalEvaluatorRequestParams,
FileEnvironmentResponseFileParams,
FileEnvironmentResponseParams,
+ FileEnvironmentVariableRequestParams,
FileIdParams,
FilePathParams,
FileRequestParams,
@@ -258,7 +358,9 @@
ImageChatContentParams,
ImageUrlParams,
InputResponseParams,
+ LinkedFileRequestParams,
LinkedToolResponseParams,
+ ListAgentsParams,
ListDatasetsParams,
ListEvaluatorsParams,
ListFlowsParams,
@@ -266,24 +368,27 @@
ListToolsParams,
LlmEvaluatorRequestParams,
LogResponseParams,
+ LogStreamResponseParams,
MonitoringEvaluatorEnvironmentRequestParams,
MonitoringEvaluatorResponseParams,
MonitoringEvaluatorVersionRequestParams,
NumericEvaluatorStatsResponseParams,
OverallStatsParams,
+ PaginatedDataAgentResponseParams,
PaginatedDataEvaluationLogResponseParams,
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
PaginatedDataPromptResponseParams,
PaginatedDataToolResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
PaginatedDatapointResponseParams,
PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PopulateTemplateResponseParams,
PopulateTemplateResponsePopulatedTemplateParams,
+ PopulateTemplateResponseReasoningEffortParams,
PopulateTemplateResponseStopParams,
PopulateTemplateResponseTemplateParams,
PromptCallLogResponseParams,
@@ -291,11 +396,13 @@
PromptCallResponseToolChoiceParams,
PromptCallStreamResponseParams,
PromptKernelRequestParams,
+ PromptKernelRequestReasoningEffortParams,
PromptKernelRequestStopParams,
PromptKernelRequestTemplateParams,
PromptLogResponseParams,
PromptLogResponseToolChoiceParams,
PromptResponseParams,
+ PromptResponseReasoningEffortParams,
PromptResponseStopParams,
PromptResponseTemplateParams,
ProviderApiKeysParams,
@@ -307,6 +414,7 @@
TextChatContentParams,
TextEvaluatorStatsResponseParams,
ToolCallParams,
+ ToolCallResponseParams,
ToolChoiceParams,
ToolFunctionParams,
ToolKernelRequestParams,
@@ -329,8 +437,82 @@
__all__ = [
"AddEvaluatorsRequestEvaluatorsItem",
"AddEvaluatorsRequestEvaluatorsItemParams",
+ "AgentCallResponse",
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoice",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayload",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineTool",
+ "AgentInlineToolParams",
+ "AgentKernelRequest",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItem",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentLogResponse",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoice",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponse",
+ "AgentLogStreamResponseParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentResponse",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffort",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStop",
+ "AgentResponseStopParams",
+ "AgentResponseTemplate",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItem",
+ "AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContent",
+ "AnthropicThinkingContentParams",
"AsyncHumanloop",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
@@ -341,11 +523,15 @@
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItem",
+ "ChatMessageThinkingItemParams",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"CodeEvaluatorRequestParams",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequest",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValue",
@@ -438,6 +624,7 @@
"EvaluatorVersionId",
"EvaluatorVersionIdParams",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"ExternalEvaluatorRequestParams",
"FeedbackType",
@@ -445,6 +632,8 @@
"FileEnvironmentResponseFile",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequest",
+ "FileEnvironmentVariableRequestParams",
"FileId",
"FileIdParams",
"FilePath",
@@ -477,8 +666,12 @@
"ImageUrlParams",
"InputResponse",
"InputResponseParams",
+ "LinkedFileRequest",
+ "LinkedFileRequestParams",
"LinkedToolResponse",
"LinkedToolResponseParams",
+ "ListAgents",
+ "ListAgentsParams",
"ListDatasets",
"ListDatasetsParams",
"ListEvaluators",
@@ -495,6 +688,8 @@
"LogResponse",
"LogResponseParams",
"LogStatus",
+ "LogStreamResponse",
+ "LogStreamResponseParams",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -507,8 +702,12 @@
"NumericEvaluatorStatsResponse",
"NumericEvaluatorStatsResponseParams",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
"OverallStatsParams",
+ "PaginatedDataAgentResponse",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponse",
@@ -521,10 +720,10 @@
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponse",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponse",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponse",
@@ -538,6 +737,8 @@
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplate",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffort",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
@@ -553,10 +754,14 @@
"PromptCallStreamResponseParams",
"PromptKernelRequest",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffort",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStop",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -565,23 +770,30 @@
"PromptLogResponseToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
"PromptResponse",
"PromptResponseParams",
+ "PromptResponseReasoningEffort",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStop",
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
"ProviderApiKeysParams",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatParams",
"ResponseFormatType",
@@ -604,6 +816,8 @@
"TimeUnit",
"ToolCall",
"ToolCallParams",
+ "ToolCallResponse",
+ "ToolCallResponseParams",
"ToolChoice",
"ToolChoiceParams",
"ToolFunction",
@@ -643,6 +857,7 @@
"VersionStatsResponseParams",
"VersionStatus",
"__version__",
+ "agents",
"datasets",
"directories",
"evaluations",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
new file mode 100644
index 00000000..ab2a2f9e
--- /dev/null
+++ b/src/humanloop/agents/__init__.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AgentLogRequestAgent,
+ AgentLogRequestToolChoice,
+ AgentRequestReasoningEffort,
+ AgentRequestStop,
+ AgentRequestTemplate,
+ AgentRequestToolsItem,
+ AgentsCallRequestAgent,
+ AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestToolChoice,
+)
+from .requests import (
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStopParams,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoiceParams,
+)
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
new file mode 100644
index 00000000..5cc38277
--- /dev/null
+++ b/src/humanloop/agents/client.py
@@ -0,0 +1,3210 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .raw_client import RawAgentsClient
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.log_response import LogResponse
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from .raw_client import AsyncRawAgentsClient
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._raw_client = RawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> RawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ RawAgentsClient
+ """
+ return self._raw_client
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.log()
+ """
+ response = self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+ """
+ response = self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ """
+ response = self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.upsert(
+ model="model",
+ )
+ """
+ response = self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.get(
+ id="id",
+ )
+ """
+ response = self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete(
+ id="id",
+ )
+ """
+ response = self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.move(
+ id="id",
+ )
+ """
+ response = self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_versions(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+ """
+ response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_environments(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_monitoring(
+ id="id",
+ )
+ """
+ response = self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.deserialize(
+ agent="agent",
+ )
+ """
+ response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
+
+
+class AsyncAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> AsyncRawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ AsyncRawAgentsClient
+ """
+ return self._raw_client
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.log()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.upsert(
+ model="model",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.get(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.move(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_versions(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_environments(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_monitoring(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.deserialize(
+ agent="agent",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
new file mode 100644
index 00000000..226f3c35
--- /dev/null
+++ b/src/humanloop/agents/raw_client.py
@@ -0,0 +1,3891 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..core.http_response import HttpResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.log_response import LogResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+import httpx_sse
+import contextlib
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from ..core.http_response import AsyncHttpResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RawAgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentContinueResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentContinueResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
new file mode 100644
index 00000000..06ce37ed
--- /dev/null
+++ b/src/humanloop/agents/requests/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgentParams
+from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .agent_request_stop import AgentRequestStopParams
+from .agent_request_template import AgentRequestTemplateParams
+from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+
+__all__ = [
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStopParams",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..584112aa
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentLogRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..98a991cd
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py
new file mode 100644
index 00000000..3970451c
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
new file mode 100644
index 00000000..c251ce8e
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.chat_message import ChatMessageParams
+
+AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
new file mode 100644
index 00000000..20cde136
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+from ...requests.agent_inline_tool import AgentInlineToolParams
+
+AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..1e468fa0
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..bd068b6f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallStreamRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
new file mode 100644
index 00000000..9c8a955c
--- /dev/null
+++ b/src/humanloop/agents/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgent
+from .agent_log_request_tool_choice import AgentLogRequestToolChoice
+from .agent_request_reasoning_effort import AgentRequestReasoningEffort
+from .agent_request_stop import AgentRequestStop
+from .agent_request_template import AgentRequestTemplate
+from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestToolChoice",
+ "AgentRequestReasoningEffort",
+ "AgentRequestStop",
+ "AgentRequestTemplate",
+ "AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestToolChoice",
+]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..bfb576c2
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentLogRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..b4267202
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py
new file mode 100644
index 00000000..325a6b2e
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
new file mode 100644
index 00000000..f6474824
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.chat_message import ChatMessage
+
+AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
new file mode 100644
index 00000000..e6c54b88
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
+from ...types.agent_inline_tool import AgentInlineTool
+
+AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..6dee5a04
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..83d264f0
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallStreamRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index bf72be6a..a11298b8 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -11,6 +11,7 @@
from .datasets.client import DatasetsClient
from .evaluators.client import EvaluatorsClient
from .flows.client import FlowsClient
+from .agents.client import AgentsClient
from .directories.client import DirectoriesClient
from .files.client import FilesClient
from .evaluations.client import EvaluationsClient
@@ -21,6 +22,7 @@
from .datasets.client import AsyncDatasetsClient
from .evaluators.client import AsyncEvaluatorsClient
from .flows.client import AsyncFlowsClient
+from .agents.client import AsyncAgentsClient
from .directories.client import AsyncDirectoriesClient
from .files.client import AsyncFilesClient
from .evaluations.client import AsyncEvaluationsClient
@@ -96,6 +98,7 @@ def __init__(
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = FlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AgentsClient(client_wrapper=self._client_wrapper)
self.directories = DirectoriesClient(client_wrapper=self._client_wrapper)
self.files = FilesClient(client_wrapper=self._client_wrapper)
self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper)
@@ -171,6 +174,7 @@ def __init__(
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper)
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper)
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index f25dc2ca..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.35",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.35",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index c07358d0..3f97ee92 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -7,8 +7,8 @@
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -44,8 +44,9 @@ def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -75,12 +76,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -101,6 +105,7 @@ def list_files(
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -174,8 +179,9 @@ async def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -205,12 +211,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -239,6 +248,7 @@ async def main() -> None:
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 19f52cf2..2d30dac9 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -7,8 +7,8 @@
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -38,8 +38,11 @@ def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> HttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -69,12 +72,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -89,15 +95,16 @@ def list_files(
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -199,8 +206,11 @@ async def list_files(
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> AsyncHttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -230,12 +240,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -250,15 +263,16 @@ async def list_files(
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index c1618edb..8c070ab3 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,13 @@
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
+from ...requests.agent_response import AgentResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 48415fc9..46ea271a 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,8 @@
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
+from ...types.agent_response import AgentResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index a11776fc..bcb9491c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -214,10 +214,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1128,10 +1128,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 17007c1b..b16d1f6b 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -99,7 +99,7 @@ def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -338,7 +338,7 @@ async def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index c1147ff2..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,33 +1,49 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
+ PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e2fff4c3..865c033f 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,9 +13,11 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
@@ -33,7 +35,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.populate_template_response import PopulateTemplateResponse
from ..types.list_prompts import ListPrompts
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawPromptsClient
from ..core.pagination import AsyncPager
@@ -84,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -165,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -256,7 +262,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -479,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -537,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -648,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -706,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -962,7 +974,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -1037,8 +1049,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1599,6 +1611,93 @@ def update_monitoring(
)
return response.data
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.deserialize(
+ prompt="prompt",
+ )
+ """
+ response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
+
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1632,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1713,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1810,7 +1912,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2044,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2102,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2222,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2280,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2552,7 +2660,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2627,8 +2735,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -3284,3 +3392,106 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.deserialize(
+ prompt="prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index b5334c82..f809f1b1 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -32,7 +34,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.prompt_response import PromptResponse
from ..types.populate_template_response import PopulateTemplateResponse
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -72,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -153,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -248,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -495,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -553,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -632,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -705,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -763,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -842,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -915,7 +927,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -990,8 +1002,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1051,7 +1063,9 @@ def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -1744,6 +1758,127 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncRawPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1766,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1847,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1942,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2189,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2247,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2326,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2399,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2457,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2536,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2609,7 +2753,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2684,8 +2828,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -2745,7 +2889,9 @@ async def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -3439,3 +3585,124 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index c5119552..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..080a107e
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 644cf6b5..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
+ "PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..33f35288
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index bd9458ba..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,11 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponseParams
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_call_stream_response import AgentCallStreamResponseParams
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from .agent_inline_tool import AgentInlineToolParams
+from .agent_kernel_request import AgentKernelRequestParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_linked_file_response import AgentLinkedFileResponseParams
+from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+from .agent_log_response import AgentLogResponseParams
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+from .agent_response import AgentResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .agent_response_tools_item import AgentResponseToolsItemParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
from .chat_message import ChatMessageParams
from .chat_message_content import ChatMessageContentParams
from .chat_message_content_item import ChatMessageContentItemParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
from .code_evaluator_request import CodeEvaluatorRequestParams
+from .create_agent_log_response import CreateAgentLogResponseParams
from .create_datapoint_request import CreateDatapointRequestParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
from .create_evaluator_log_response import CreateEvaluatorLogResponseParams
@@ -51,6 +80,7 @@
from .external_evaluator_request import ExternalEvaluatorRequestParams
from .file_environment_response import FileEnvironmentResponseParams
from .file_environment_response_file import FileEnvironmentResponseFileParams
+from .file_environment_variable_request import FileEnvironmentVariableRequestParams
from .file_id import FileIdParams
from .file_path import FilePathParams
from .file_request import FileRequestParams
@@ -64,7 +94,9 @@
from .image_chat_content import ImageChatContentParams
from .image_url import ImageUrlParams
from .input_response import InputResponseParams
+from .linked_file_request import LinkedFileRequestParams
from .linked_tool_response import LinkedToolResponseParams
+from .list_agents import ListAgentsParams
from .list_datasets import ListDatasetsParams
from .list_evaluators import ListEvaluatorsParams
from .list_flows import ListFlowsParams
@@ -72,28 +104,31 @@
from .list_tools import ListToolsParams
from .llm_evaluator_request import LlmEvaluatorRequestParams
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .overall_stats import OverallStatsParams
+from .paginated_data_agent_response import PaginatedDataAgentResponseParams
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams
from .paginated_data_flow_response import PaginatedDataFlowResponseParams
from .paginated_data_log_response import PaginatedDataLogResponseParams
from .paginated_data_prompt_response import PaginatedDataPromptResponseParams
from .paginated_data_tool_response import PaginatedDataToolResponseParams
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
from .paginated_datapoint_response import PaginatedDatapointResponseParams
from .paginated_dataset_response import PaginatedDatasetResponseParams
from .paginated_evaluation_response import PaginatedEvaluationResponseParams
from .populate_template_response import PopulateTemplateResponseParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .populate_template_response_stop import PopulateTemplateResponseStopParams
from .populate_template_response_template import PopulateTemplateResponseTemplateParams
from .prompt_call_log_response import PromptCallLogResponseParams
@@ -101,11 +136,13 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_call_stream_response import PromptCallStreamResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .prompt_log_response import PromptLogResponseParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
from .prompt_response_template import PromptResponseTemplateParams
from .provider_api_keys import ProviderApiKeysParams
@@ -117,6 +154,7 @@
from .text_chat_content import TextChatContentParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
from .tool_call import ToolCallParams
+from .tool_call_response import ToolCallResponseParams
from .tool_choice import ToolChoiceParams
from .tool_function import ToolFunctionParams
from .tool_kernel_request import ToolKernelRequestParams
@@ -135,12 +173,41 @@
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
__all__ = [
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineToolParams",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponseParams",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStopParams",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItemParams",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContentParams",
"BooleanEvaluatorStatsResponseParams",
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItemParams",
"CodeEvaluatorRequestParams",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValueParams",
"CreateEvaluatorLogResponseParams",
@@ -180,6 +247,7 @@
"ExternalEvaluatorRequestParams",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequestParams",
"FileIdParams",
"FilePathParams",
"FileRequestParams",
@@ -193,7 +261,9 @@
"ImageChatContentParams",
"ImageUrlParams",
"InputResponseParams",
+ "LinkedFileRequestParams",
"LinkedToolResponseParams",
+ "ListAgentsParams",
"ListDatasetsParams",
"ListEvaluatorsParams",
"ListFlowsParams",
@@ -201,24 +271,27 @@
"ListToolsParams",
"LlmEvaluatorRequestParams",
"LogResponseParams",
+ "LogStreamResponseParams",
"MonitoringEvaluatorEnvironmentRequestParams",
"MonitoringEvaluatorResponseParams",
"MonitoringEvaluatorVersionRequestParams",
"NumericEvaluatorStatsResponseParams",
"OverallStatsParams",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponseParams",
"PaginatedDataFlowResponseParams",
"PaginatedDataLogResponseParams",
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponseParams",
"PaginatedEvaluationResponseParams",
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplateParams",
"PromptCallLogResponseParams",
@@ -226,11 +299,13 @@
"PromptCallResponseToolChoiceParams",
"PromptCallStreamResponseParams",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplateParams",
"PromptLogResponseParams",
"PromptLogResponseToolChoiceParams",
"PromptResponseParams",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStopParams",
"PromptResponseTemplateParams",
"ProviderApiKeysParams",
@@ -242,6 +317,7 @@
"TextChatContentParams",
"TextEvaluatorStatsResponseParams",
"ToolCallParams",
+ "ToolCallResponseParams",
"ToolChoiceParams",
"ToolFunctionParams",
"ToolKernelRequestParams",
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
new file mode 100644
index 00000000..ffc925ec
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..6cc9f9c4
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
new file mode 100644
index 00000000..9555925d
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..0e08a6f3
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
new file mode 100644
index 00000000..8300667b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..24b044cc
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
new file mode 100644
index 00000000..1038e000
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..ddd74c10
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
new file mode 100644
index 00000000..31f9401a
--- /dev/null
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .tool_function import ToolFunctionParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentInlineToolParams(typing_extensions.TypedDict):
+ type: typing.Literal["inline"]
+ json_schema: ToolFunctionParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
new file mode 100644
index 00000000..0ca76571
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+
+
+class AgentKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentKernelRequestStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]]
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..ea32bc11
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py
new file mode 100644
index 00000000..eae95d35
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
new file mode 100644
index 00000000..7261667d
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..27b63984
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_inline_tool import AgentInlineToolParams
+
+AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
new file mode 100644
index 00000000..18fc2274
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
new file mode 100644
index 00000000..8a690a77
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+
+
+class AgentLinkedFileResponseParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
+ file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"]
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
new file mode 100644
index 00000000..bb328de2
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponseParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
+ from .evaluator_response import EvaluatorResponseParams
+ from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
+AgentLinkedFileResponseFileParams = typing.Union[
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
+]
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
new file mode 100644
index 00000000..0cb24b8a
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response.py
@@ -0,0 +1,201 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+import typing
+
+if typing.TYPE_CHECKING:
+ from .evaluator_log_response import EvaluatorLogResponseParams
+ from .log_response import LogResponseParams
+
+
+class AgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..e239a69c
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentLogResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
new file mode 100644
index 00000000..710d55cf
--- /dev/null
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .chat_message import ChatMessageParams
+
+
+class AgentLogStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ agent_id: str
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str
+ """
+ ID of the specific version of the Agent.
+ """
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
new file mode 100644
index 00000000..f482728d
--- /dev/null
+++ b/src/humanloop/requests/agent_response.py
@@ -0,0 +1,242 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .environment_response import EnvironmentResponseParams
+import datetime as dt
+from ..types.user_response import UserResponse
+from ..types.version_status import VersionStatus
+from .input_response import InputResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_response_tools_item import AgentResponseToolsItemParams
+ from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
+
+
+class AgentResponseParams(typing_extensions.TypedDict):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing_extensions.NotRequired[str]
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentResponseTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentResponseStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Sequence["AgentResponseToolsItemParams"]
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing_extensions.NotRequired[str]
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing_extensions.NotRequired[str]
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Description of the Agent.
+ """
+
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing_extensions.NotRequired[str]
+ """
+ Long description of the file.
+ """
+
+ name: str
+ """
+ Name of the Agent.
+ """
+
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing_extensions.NotRequired[typing.Literal["agent"]]
+ environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]]
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.Sequence[InputResponseParams]
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]]
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]]
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..de1b969f
--- /dev/null
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py
new file mode 100644
index 00000000..a395ee73
--- /dev/null
+++ b/src/humanloop/requests/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
new file mode 100644
index 00000000..94be65f1
--- /dev/null
+++ b/src/humanloop/requests/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
new file mode 100644
index 00000000..5181579b
--- /dev/null
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineToolParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponseParams
+AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams]
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..3b328f7f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["redacted_thinking"]
+ data: str
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
new file mode 100644
index 00000000..34f6f99f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["thinking"]
+ thinking: str
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index cab8466d..6011653a 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -6,6 +6,7 @@
from ..types.chat_role import ChatRole
import typing
from .tool_call import ToolCallParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
class ChatMessageParams(typing_extensions.TypedDict):
@@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict):
"""
A list of tool calls requested by the assistant.
"""
+
+ thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]]
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
new file mode 100644
index 00000000..0691f4d8
--- /dev/null
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContentParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+
+ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
new file mode 100644
index 00000000..b1715517
--- /dev/null
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.log_status import LogStatus
+
+
+class CreateAgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1d59ed4b..1cffd2b2 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict):
Description of the Dataset.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index f101bf15..db9370b9 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,13 @@
from .evaluator_response import EvaluatorResponseParams
from .dataset_response import DatasetResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ EvaluatorResponseParams,
+ DatasetResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 908eeb2d..1ff836fb 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict):
Description of the Evaluator.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 4ac6b0c3..04c0b51d 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -6,7 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
FileEnvironmentResponseFileParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py
new file mode 100644
index 00000000..bb70bda4
--- /dev/null
+++ b/src/humanloop/requests/file_environment_variable_request.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict):
+ name: str
+ """
+ Name of the environment variable.
+ """
+
+ value: str
+ """
+ Value of the environment variable.
+ """
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 18a26d10..eebc9fd7 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict):
Description of the Flow.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
new file mode 100644
index 00000000..2bbba19c
--- /dev/null
+++ b/src/humanloop/requests/linked_file_request.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class LinkedFileRequestParams(typing_extensions.TypedDict):
+ file_id: str
+ environment_id: typing_extensions.NotRequired[str]
+ version_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
new file mode 100644
index 00000000..4a72f1db
--- /dev/null
+++ b/src/humanloop/requests/list_agents.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class ListAgentsParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ """
+ The list of Agents.
+ """
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index 15a4cff6..cb3ce212 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -9,6 +9,11 @@
from .tool_log_response import ToolLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
LogResponseParams = typing.Union[
- "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams"
+ "PromptLogResponseParams",
+ "ToolLogResponseParams",
+ "EvaluatorLogResponseParams",
+ "FlowLogResponseParams",
+ "AgentLogResponseParams",
]
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
new file mode 100644
index 00000000..e142e7fb
--- /dev/null
+++ b/src/humanloop/requests/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponseParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+
+LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
new file mode 100644
index 00000000..c8d67533
--- /dev/null
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class PaginatedDataAgentResponseParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ page: int
+ size: int
+ total: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 65%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index cf8bc4bf..0e7adb64 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -2,16 +2,16 @@
import typing_extensions
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams(
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams(
typing_extensions.TypedDict
):
records: typing.Sequence[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams
]
page: int
size: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 58%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 1ba74108..b43a5521 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,9 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = (
- typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
- ]
-)
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
+]
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 190341b0..40b62295 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -9,7 +9,7 @@
from .populate_template_response_stop import PopulateTemplateResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -213,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..6b1dd46a
--- /dev/null
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 61355166..1e4f56de 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -9,11 +9,17 @@
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
class PromptKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..0c3d194b
--- /dev/null
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index 912866c5..05b4a71e 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -10,7 +10,7 @@
from .prompt_response_stop import PromptResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -215,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..4d019051
--- /dev/null
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 879ea25c..569d0d76 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -5,7 +5,8 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
RunVersionResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
new file mode 100644
index 00000000..1c92b28f
--- /dev/null
+++ b/src/humanloop/requests/tool_call_response.py
@@ -0,0 +1,146 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .tool_response import ToolResponseParams
+import typing
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class ToolCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponseParams
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index bac9dbbb..1aa0daea 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -7,6 +7,7 @@
import typing
from ..types.log_status import LogStatus
from .tool_response import ToolResponseParams
+from .chat_message import ChatMessageParams
import typing
if typing.TYPE_CHECKING:
@@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
"""
Tool used to generate the Log.
"""
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the Tool.
+ """
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 8a16af00..9659cb49 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionDeploymentResponseFileParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 50ecf7bc..9c317679 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionIdResponseVersionParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index 16d75bd7..ea6b14a2 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -3,10 +3,11 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawToolsClient
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
+from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..types.project_sort_by import ProjectSortBy
@@ -29,6 +30,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawToolsClient
from ..core.pagination import AsyncPager
@@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient:
"""
return self._raw_client
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
def log(
self,
*,
@@ -59,6 +189,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -78,7 +209,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -106,6 +236,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -163,9 +296,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -206,6 +336,7 @@ def log(
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -225,7 +356,6 @@ def log(
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -966,6 +1096,112 @@ def update_monitoring(
)
return response.data
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.get_environment_variables(
+ id="id",
+ )
+ """
+ response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+ """
+ response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+ """
+ response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
+
class AsyncToolsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient:
"""
return self._raw_client
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
async def log(
self,
*,
@@ -989,6 +1360,7 @@ async def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1008,7 +1380,6 @@ async def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -1036,6 +1407,9 @@ async def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1093,9 +1467,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1144,6 +1515,7 @@ async def main() -> None:
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -1163,7 +1535,6 @@ async def main() -> None:
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -2010,3 +2381,133 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.get_environment_variables(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 4a1f29e9..b412b771 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -2,18 +2,19 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..requests.tool_function import ToolFunctionParams
@@ -27,6 +28,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -38,6 +41,159 @@ class RawToolsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def log(
self,
*,
@@ -45,6 +201,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -64,7 +221,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateToolLogResponse]:
"""
@@ -92,6 +248,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -149,9 +308,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -170,6 +326,9 @@ def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -189,9 +348,6 @@ def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -1038,75 +1194,387 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
-class AsyncRawToolsClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- async def log(
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_environment_variable(
self,
+ id: str,
*,
- version_id: typing.Optional[str] = None,
- environment: typing.Optional[str] = None,
- path: typing.Optional[str] = OMIT,
- id: typing.Optional[str] = OMIT,
- start_time: typing.Optional[dt.datetime] = OMIT,
- end_time: typing.Optional[dt.datetime] = OMIT,
- output: typing.Optional[str] = OMIT,
- created_at: typing.Optional[dt.datetime] = OMIT,
- error: typing.Optional[str] = OMIT,
- provider_latency: typing.Optional[float] = OMIT,
- stdout: typing.Optional[str] = OMIT,
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- source: typing.Optional[str] = OMIT,
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
- source_datapoint_id: typing.Optional[str] = OMIT,
- trace_parent_id: typing.Optional[str] = OMIT,
- user: typing.Optional[str] = OMIT,
- tool_log_request_environment: typing.Optional[str] = OMIT,
- save: typing.Optional[bool] = OMIT,
- log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
"""
- Log to a Tool.
-
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Tool. Otherwise the default deployed version will be chosen.
-
- Instead of targeting an existing version explicitly, you can instead pass in
- Tool details in the request body. In this case, we will check if the details correspond
- to an existing version of the Tool, if not we will create a new version. This is helpful
- in the case where you are storing or deriving your Tool details in code.
+ Add an environment variable to a Tool.
Parameters
----------
- version_id : typing.Optional[str]
- A specific Version ID of the Tool to log to.
-
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
-
- path : typing.Optional[str]
- Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
- id : typing.Optional[str]
- ID for an existing Tool.
-
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ id : str
+ Unique identifier for Tool.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- created_at : typing.Optional[dt.datetime]
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawToolsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ """
+ Log to a Tool.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool, if not we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
User defined timestamp for when the log was created.
error : typing.Optional[str]
@@ -1154,9 +1622,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1175,6 +1640,9 @@ async def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -1194,9 +1662,6 @@ async def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -2044,3 +2509,159 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 156f4e9a..8130325d 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,15 +1,44 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponse
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+from .agent_call_stream_response import AgentCallStreamResponse
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .agent_inline_tool import AgentInlineTool
+from .agent_kernel_request import AgentKernelRequest
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile
+from .agent_log_response import AgentLogResponse
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+from .agent_log_stream_response import AgentLogStreamResponse
+from .agent_response import AgentResponse
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
+from .agent_response_tools_item import AgentResponseToolsItem
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
from .base_models_user_response import BaseModelsUserResponse
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
from .chat_message import ChatMessage
from .chat_message_content import ChatMessageContent
from .chat_message_content_item import ChatMessageContentItem
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .chat_tool_type import ChatToolType
from .code_evaluator_request import CodeEvaluatorRequest
from .config_tool_response import ConfigToolResponse
+from .create_agent_log_response import CreateAgentLogResponse
from .create_datapoint_request import CreateDatapointRequest
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
from .create_evaluator_log_response import CreateEvaluatorLogResponse
@@ -56,10 +85,12 @@
from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .evaluator_version_id import EvaluatorVersionId
from .evaluators_request import EvaluatorsRequest
+from .event_type import EventType
from .external_evaluator_request import ExternalEvaluatorRequest
from .feedback_type import FeedbackType
from .file_environment_response import FileEnvironmentResponse
from .file_environment_response_file import FileEnvironmentResponseFile
+from .file_environment_variable_request import FileEnvironmentVariableRequest
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
@@ -77,7 +108,9 @@
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .input_response import InputResponse
+from .linked_file_request import LinkedFileRequest
from .linked_tool_response import LinkedToolResponse
+from .list_agents import ListAgents
from .list_datasets import ListDatasets
from .list_evaluators import ListEvaluators
from .list_flows import ListFlows
@@ -86,6 +119,7 @@
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
from .log_status import LogStatus
+from .log_stream_response import LogStreamResponse
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -94,18 +128,21 @@
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .observability_status import ObservabilityStatus
+from .on_agent_call_enum import OnAgentCallEnum
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
from .overall_stats import OverallStats
+from .paginated_data_agent_response import PaginatedDataAgentResponse
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from .paginated_data_flow_response import PaginatedDataFlowResponse
from .paginated_data_log_response import PaginatedDataLogResponse
from .paginated_data_prompt_response import PaginatedDataPromptResponse
from .paginated_data_tool_response import PaginatedDataToolResponse
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from .paginated_datapoint_response import PaginatedDatapointResponse
from .paginated_dataset_response import PaginatedDatasetResponse
@@ -115,6 +152,7 @@
from .platform_access_enum import PlatformAccessEnum
from .populate_template_response import PopulateTemplateResponse
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
from .project_sort_by import ProjectSortBy
@@ -123,15 +161,16 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .prompt_call_stream_response import PromptCallStreamResponse
from .prompt_kernel_request import PromptKernelRequest
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .prompt_log_response import PromptLogResponse
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
from .prompt_response import PromptResponse
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
from .prompt_response_template import PromptResponseTemplate
from .provider_api_keys import ProviderApiKeys
-from .reasoning_effort import ReasoningEffort
from .response_format import ResponseFormat
from .response_format_type import ResponseFormatType
from .run_stats_response import RunStatsResponse
@@ -144,6 +183,7 @@
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
from .time_unit import TimeUnit
from .tool_call import ToolCall
+from .tool_call_response import ToolCallResponse
from .tool_choice import ToolChoice
from .tool_function import ToolFunction
from .tool_kernel_request import ToolKernelRequest
@@ -167,16 +207,45 @@
from .version_status import VersionStatus
__all__ = [
+ "AgentCallResponse",
+ "AgentCallResponseToolChoice",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponsePayload",
"AgentConfigResponse",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
+ "AgentInlineTool",
+ "AgentKernelRequest",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestToolsItem",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLogResponse",
+ "AgentLogResponseToolChoice",
+ "AgentLogStreamResponse",
+ "AgentResponse",
+ "AgentResponseReasoningEffort",
+ "AgentResponseStop",
+ "AgentResponseTemplate",
+ "AgentResponseToolsItem",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicThinkingContent",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
"ChatMessage",
"ChatMessageContent",
"ChatMessageContentItem",
+ "ChatMessageThinkingItem",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
"CreateDatapointRequest",
"CreateDatapointRequestTargetValue",
"CreateEvaluatorLogResponse",
@@ -221,10 +290,12 @@
"EvaluatorReturnTypeEnum",
"EvaluatorVersionId",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"FeedbackType",
"FileEnvironmentResponse",
"FileEnvironmentResponseFile",
+ "FileEnvironmentVariableRequest",
"FileId",
"FilePath",
"FileRequest",
@@ -242,7 +313,9 @@
"ImageUrl",
"ImageUrlDetail",
"InputResponse",
+ "LinkedFileRequest",
"LinkedToolResponse",
+ "ListAgents",
"ListDatasets",
"ListEvaluators",
"ListFlows",
@@ -251,6 +324,7 @@
"LlmEvaluatorRequest",
"LogResponse",
"LogStatus",
+ "LogStreamResponse",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -259,15 +333,18 @@
"MonitoringEvaluatorVersionRequest",
"NumericEvaluatorStatsResponse",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
+ "PaginatedDataAgentResponse",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluatorResponse",
"PaginatedDataFlowResponse",
"PaginatedDataLogResponse",
"PaginatedDataPromptResponse",
"PaginatedDataToolResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
"PaginatedDatapointResponse",
"PaginatedDatasetResponse",
"PaginatedEvaluationResponse",
@@ -276,6 +353,7 @@
"PlatformAccessEnum",
"PopulateTemplateResponse",
"PopulateTemplateResponsePopulatedTemplate",
+ "PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
"ProjectSortBy",
@@ -284,15 +362,16 @@
"PromptCallResponseToolChoice",
"PromptCallStreamResponse",
"PromptKernelRequest",
+ "PromptKernelRequestReasoningEffort",
"PromptKernelRequestStop",
"PromptKernelRequestTemplate",
"PromptLogResponse",
"PromptLogResponseToolChoice",
"PromptResponse",
+ "PromptResponseReasoningEffort",
"PromptResponseStop",
"PromptResponseTemplate",
"ProviderApiKeys",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatType",
"RunStatsResponse",
@@ -305,6 +384,7 @@
"TextEvaluatorStatsResponse",
"TimeUnit",
"ToolCall",
+ "ToolCallResponse",
"ToolChoice",
"ToolFunction",
"ToolKernelRequest",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
new file mode 100644
index 00000000..ba3bbfec
--- /dev/null
+++ b/src/humanloop/types/agent_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..95eca73e
--- /dev/null
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
new file mode 100644
index 00000000..673d3738
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..85422047
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
new file mode 100644
index 00000000..0bbd7858
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..20f3fb75
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
new file mode 100644
index 00000000..ff7a0fac
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..0e5f8a58
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
new file mode 100644
index 00000000..dc618c35
--- /dev/null
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tool_function import ToolFunction
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentInlineTool(UncheckedBaseModel):
+ type: typing.Literal["inline"] = "inline"
+ json_schema: ToolFunction
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
new file mode 100644
index 00000000..6503b104
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -0,0 +1,122 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .response_format import ResponseFormat
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..a8e8e98b
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py
new file mode 100644
index 00000000..e38c12e2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
new file mode 100644
index 00000000..31a351f2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..82c2fecf
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_inline_tool import AgentInlineTool
+
+AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
new file mode 100644
index 00000000..9efd4b6a
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentLinkedFileRequest(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
new file mode 100644
index 00000000..d85d682e
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLinkedFileResponse(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+ file: typing.Optional["AgentLinkedFileResponseFile"] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_response import AgentResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+
+update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
new file mode 100644
index 00000000..42d38fe4
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponse
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
+ from .evaluator_response import EvaluatorResponse
+ from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
+AgentLinkedFileResponseFile = typing.Union[
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
+]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
new file mode 100644
index 00000000..f5b5e8e8
--- /dev/null
+++ b/src/humanloop/types/agent_log_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLogResponse(UncheckedBaseModel):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
+from .flow_log_response import FlowLogResponse # noqa: E402
+from .prompt_log_response import PromptLogResponse # noqa: E402
+from .tool_log_response import ToolLogResponse # noqa: E402
+from .log_response import LogResponse # noqa: E402
+
+update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..5cb07628
--- /dev/null
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentLogResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
new file mode 100644
index 00000000..91547189
--- /dev/null
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+import datetime as dt
+from .chat_message import ChatMessage
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentLogStreamResponse(UncheckedBaseModel):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ ID of the specific version of the Agent.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
new file mode 100644
index 00000000..0487d7b7
--- /dev/null
+++ b/src/humanloop/types/agent_response.py
@@ -0,0 +1,265 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStop
+from .response_format import ResponseFormat
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+import typing_extensions
+from ..core.serialization import FieldMetadata
+from .environment_response import EnvironmentResponse
+import datetime as dt
+from .user_response import UserResponse
+from .version_status import VersionStatus
+from .input_response import InputResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentResponse(UncheckedBaseModel):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str = pydantic.Field()
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.List["AgentResponseToolsItem"] = pydantic.Field()
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the Agent.
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Long description of the file.
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the Agent.
+ """
+
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing.Optional[typing.Literal["agent"]] = None
+ environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None)
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus = pydantic.Field()
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.List[InputResponse] = pydantic.Field()
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None)
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+
+update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..59254f38
--- /dev/null
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py
new file mode 100644
index 00000000..5c3b6a48
--- /dev/null
+++ b/src/humanloop/types/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
new file mode 100644
index 00000000..4c084dc8
--- /dev/null
+++ b/src/humanloop/types/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
new file mode 100644
index 00000000..8095608f
--- /dev/null
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineTool
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponse
+AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool]
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..ebac897b
--- /dev/null
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicRedactedThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["redacted_thinking"] = "redacted_thinking"
+ data: str = pydantic.Field()
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
new file mode 100644
index 00000000..bf7fc808
--- /dev/null
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["thinking"] = "thinking"
+ thinking: str = pydantic.Field()
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str = pydantic.Field()
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c09f2768..c72bc90d 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -6,6 +6,7 @@
import pydantic
from .chat_role import ChatRole
from .tool_call import ToolCall
+from .chat_message_thinking_item import ChatMessageThinkingItem
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel):
A list of tool calls requested by the assistant.
"""
+ thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None)
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
new file mode 100644
index 00000000..0a507724
--- /dev/null
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContent
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+
+ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
new file mode 100644
index 00000000..9dc66629
--- /dev/null
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CreateAgentLogResponse(UncheckedBaseModel):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index af79f597..2c614521 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel):
Description of the Dataset.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 5828a678..51f879b8 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 0bfeebf7..9d0d5fc4 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,8 @@
from .evaluator_response import EvaluatorResponse
from .dataset_response import DatasetResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
- PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse
+ PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 9ba9fe4d..4332aa12 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 413081c6..0c7de27e 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 6c931db0..84d117e2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index f113fff5..bcda94a4 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 1203ce2c..74d59e4c 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index d91e1ee9..e09b2a73 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e457d580..71ca76c0 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -189,6 +191,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 175f456d..712ca698 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -5,6 +5,8 @@
import pydantic
import typing
from .evaluator_response_spec import EvaluatorResponseSpec
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel):
Description of the Evaluator.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -124,6 +133,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
new file mode 100644
index 00000000..128eed92
--- /dev/null
+++ b/src/humanloop/types/event_type.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EventType = typing.Union[
+ typing.Literal[
+ "agent_turn_start",
+ "agent_turn_suspend",
+ "agent_turn_continue",
+ "agent_turn_end",
+ "agent_start",
+ "agent_update",
+ "agent_end",
+ "tool_start",
+ "tool_update",
+ "tool_end",
+ "error",
+ "agent_generation_error",
+ ],
+ typing.Any,
+]
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 70ed322f..7f34b7b3 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 2a105c9d..0254c2b8 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
FileEnvironmentResponseFile = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
new file mode 100644
index 00000000..8108245b
--- /dev/null
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class FileEnvironmentVariableRequest(UncheckedBaseModel):
+ name: str = pydantic.Field()
+ """
+ Name of the environment variable.
+ """
+
+ value: str = pydantic.Field()
+ """
+ Value of the environment variable.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py
index 7a870b84..f235825b 100644
--- a/src/humanloop/types/file_type.py
+++ b/src/humanloop/types/file_type.py
@@ -2,4 +2,4 @@
import typing
-FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any]
+FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any]
diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py
index c32b9755..753d9ba2 100644
--- a/src/humanloop/types/files_tool_type.py
+++ b/src/humanloop/types/files_tool_type.py
@@ -3,5 +3,5 @@
import typing
FilesToolType = typing.Union[
- typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any
+ typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index ba1e1cf6..58a87fac 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -173,6 +175,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 4017b3b7..7768778e 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -4,6 +4,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel):
Description of the Flow.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -111,6 +120,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
new file mode 100644
index 00000000..ee45ffdf
--- /dev/null
+++ b/src/humanloop/types/linked_file_request.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class LinkedFileRequest(UncheckedBaseModel):
+ file_id: str
+ environment_id: typing.Optional[str] = None
+ version_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
new file mode 100644
index 00000000..36481f41
--- /dev/null
+++ b/src/humanloop/types/list_agents.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ListAgents(UncheckedBaseModel):
+ records: typing.List[AgentResponse] = pydantic.Field()
+ """
+ The list of Agents.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 61edbec5..7b736e14 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 686dab26..41ec4008 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index 94cda05e..f773d3f9 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 4080a6a1..84ddc89c 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index 0ba81dd3..cd7a0a26 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -9,4 +9,7 @@
from .tool_log_response import ToolLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
-LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"]
+ from .agent_log_response import AgentLogResponse
+LogResponse = typing.Union[
+ "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
+]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
new file mode 100644
index 00000000..69ffacf4
--- /dev/null
+++ b/src/humanloop/types/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponse
+from .agent_log_stream_response import AgentLogStreamResponse
+
+LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py
index 8473d2ae..3f2c99fb 100644
--- a/src/humanloop/types/model_providers.py
+++ b/src/humanloop/types/model_providers.py
@@ -4,7 +4,7 @@
ModelProviders = typing.Union[
typing.Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek"
+ "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate"
],
typing.Any,
]
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index e70dc4fb..1809af57 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -39,6 +39,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py
new file mode 100644
index 00000000..3730256e
--- /dev/null
+++ b/src/humanloop/types/on_agent_call_enum.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any]
diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py
new file mode 100644
index 00000000..d8c48547
--- /dev/null
+++ b/src/humanloop/types/open_ai_reasoning_effort.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
new file mode 100644
index 00000000..0febbadd
--- /dev/null
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedDataAgentResponse(UncheckedBaseModel):
+ records: typing.List[AgentResponse]
+ page: int
+ size: int
+ total: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index 9e3c568e..c508f8a6 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 275f0528..2e82c736 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 990d58be..6cfcf9ae 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index 57bae587..f41ca9ba 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index ff71e584..d9e1d914 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index 0e52b361..e2962e87 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 76%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index bd7082b3..87d5b603 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -9,16 +11,18 @@
from .version_deployment_response import VersionDeploymentResponse
from .version_id_response import VersionIdResponse
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel):
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
+ UncheckedBaseModel
+):
records: typing.List[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem
]
page: int
size: int
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 63%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 65c4f324..a1b4f056 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-]
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
+ typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
+)
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 78e177e8..16232e0b 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index d587d175..d2d36f78 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -16,9 +18,11 @@
from .model_providers import ModelProviders
from .populate_template_response_stop import PopulateTemplateResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -220,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..8dd9f7f6
--- /dev/null
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index 4e1ae69c..ec74437f 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 6461bb19..80ba5ed5 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -9,12 +9,18 @@
from .model_providers import ModelProviders
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .tool_function import ToolFunction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str = pydantic.Field()
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..dda61bb4
--- /dev/null
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 2a1bad11..a9e26318 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -213,6 +215,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 07f4755d..786617f3 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -10,9 +10,11 @@
from .model_providers import ModelProviders
from .prompt_response_stop import PromptResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -214,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -224,6 +238,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..e136637f
--- /dev/null
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py
deleted file mode 100644
index da0a0354..00000000
--- a/src/humanloop/types/reasoning_effort.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index d94b1178..770dc487 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -5,5 +5,6 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse]
+RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
new file mode 100644
index 00000000..55bf2712
--- /dev/null
+++ b/src/humanloop/types/tool_call_response.py
@@ -0,0 +1,168 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+import datetime as dt
+import pydantic
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ToolCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponse = pydantic.Field()
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 1b6081c3..251223af 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -13,6 +15,7 @@
import datetime as dt
import pydantic
from .log_status import LogStatus
+from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.pydantic_utilities import update_forward_refs
@@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel):
Tool used to generate the Log.
"""
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the Tool.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -162,6 +170,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 0b835918..70537215 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -152,6 +152,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index e2e82d9f..0db57d69 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -36,6 +36,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index e0f73573..4fadcff0 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionDeploymentResponseFile = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index 877851a9..e3f5dc27 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -30,6 +30,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 2f56346c..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
From d703478ea4e9acd6cbc09fe35f6d12e617dc889f Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 29 Apr 2025 12:58:03 +0100
Subject: [PATCH 13/39] refactor overloads
---
src/humanloop/overload.py | 92 ++++++++++++++++++++++++---------------
1 file changed, 56 insertions(+), 36 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index a550c92a..fbdc5fac 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,7 +1,7 @@
import inspect
import logging
import types
-from typing import TypeVar, Union, Literal
+from typing import TypeVar, Union, Literal, Optional
from pathlib import Path
from humanloop.context import (
get_decorator_context,
@@ -117,7 +117,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
logger.info(f"Calling inner overload")
response = self._call(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -126,55 +126,75 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
+def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> FileType:
+ """Get the file type based on the client type."""
+ if isinstance(client, PromptsClient):
+ return "prompt"
+ elif isinstance(client, AgentsClient):
+ return "agent"
+ else:
+ raise ValueError(f"Unsupported client type: {type(client)}")
+
+def _handle_local_file(path: str, file_type: FileType) -> Optional[str]:
+ """Handle reading from a local file if it exists.
+
+ Args:
+ path: The path to the file
+ file_type: The type of file ("prompt" or "agent")
+
+ Returns:
+ The file content if found, None otherwise
+ """
+ try:
+ # Construct path to local file
+ local_path = Path("humanloop") / path # FLAG: ensure that when passing the path back to remote, it's using forward slashes
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if local_path.exists():
+ # Read the file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
+ else:
+ logger.warning(f"Local file not found: {local_path}, falling back to API")
+ return None
+ except Exception as e:
+ logger.error(f"Error reading local file: {e}, falling back to API")
+ return None
+
def overload_with_local_files(
client: Union[PromptsClient, AgentsClient],
use_local_files: bool,
) -> Union[PromptsClient, AgentsClient]:
- """Overload call to handle local files when use_local_files is True.
+ """Overload call and log methods to handle local files when use_local_files is True.
Args:
client: The client to overload (PromptsClient or AgentsClient)
use_local_files: Whether to use local files
- file_type: Type of file ("prompt" or "agent")
"""
original_call = client._call if hasattr(client, '_call') else client.call
original_log = client._log if hasattr(client, '_log') else client.log
- # get file type from client type
- file_type: FileType
- if isinstance(client, PromptsClient):
- file_type = "prompt"
- elif isinstance(client, AgentsClient):
- file_type = "agent"
- else:
- raise ValueError(f"Unsupported client type: {type(client)}")
+ file_type = _get_file_type_from_client(client)
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
+ # Handle local files if enabled
if use_local_files and "path" in kwargs:
- try:
- # Construct path to local file
- local_path = Path("humanloop") / kwargs["path"] # FLAG: ensure that when passing the path back to remote, it's using forward slashes
- # Add appropriate extension
- local_path = local_path.parent / f"{local_path.stem}.{file_type}"
-
- if local_path.exists():
- # Read the file content
- with open(local_path) as f:
- file_content = f.read()
-
- kwargs[file_type] = file_content # "prompt" or "agent" # TODO: raise warning if kernel passed in
-
- logger.debug(f"Using local file content from {local_path}")
- else:
- logger.warning(f"Local file not found: {local_path}, falling back to API")
- except Exception as e:
- logger.error(f"Error reading local file: {e}, falling back to API")
+ file_content = _handle_local_file(kwargs["path"], file_type)
+ if file_content is not None:
+ kwargs[file_type] = file_content
- if function_name == "call":
- return original_call(**kwargs)
- elif function_name == "log":
- return original_log(**kwargs)
- else:
- raise ValueError(f"Unsupported function name: {function_name}")
+ try:
+ if function_name == "call":
+ return original_call(**kwargs)
+ elif function_name == "log":
+ return original_log(**kwargs)
+ else:
+ raise ValueError(f"Unsupported function name: {function_name}")
+ except Exception as e:
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
+ raise HumanloopRuntimeError from e
def _overload_call(self, **kwargs) -> PromptCallResponse:
return _overload(self, "call", **kwargs)
From f72fda0ff34ee6947409d32941910ba68f619525 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 29 Apr 2025 13:22:24 +0100
Subject: [PATCH 14/39] add tests for overloading call and log operations on
agents and prompts
---
tests/conftest.py | 5 ++-
tests/sync/test_sync.py | 89 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 93 insertions(+), 1 deletion(-)
diff --git a/tests/conftest.py b/tests/conftest.py
index fa213133..6203cfa6 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -191,10 +191,13 @@ def api_keys() -> APIKeys:
@pytest.fixture(scope="session")
-def humanloop_client(api_keys: APIKeys) -> Humanloop:
+def humanloop_client(request, api_keys: APIKeys) -> Humanloop:
+ """Create a Humanloop client for testing."""
+ use_local_files = getattr(request, "param", False)
return Humanloop(
api_key=api_keys.humanloop,
base_url="http://localhost:80/v5/",
+ use_local_files=use_local_files
)
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
index 520a1979..5832d219 100644
--- a/tests/sync/test_sync.py
+++ b/tests/sync/test_sync.py
@@ -2,6 +2,7 @@
from pathlib import Path
import pytest
from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
+from humanloop.error import HumanloopRuntimeError
class SyncableFile(NamedTuple):
@@ -94,3 +95,91 @@ def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[Synca
# Verify it's not empty
content = local_path.read_text()
assert content, f"File at {local_path} should not be empty"
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test using the pulled files
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test call with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(path="invalid/path")
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(path="invalid/path")
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files for log operations.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test logging using the pulled files
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test log with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
\ No newline at end of file
From 881f6e25eeba0e2c7cd0a50835990af58831b9de Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 15:04:18 +0100
Subject: [PATCH 15/39] add environment and path filter to pull operation
---
src/humanloop/client.py | 26 +++++++++++++++++++-------
src/humanloop/sync/sync_client.py | 23 +++++++++++++++++++++--
tests/sync/test_sync.py | 4 ++++
3 files changed, 44 insertions(+), 9 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 6b16e7cf..48e179b2 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -100,6 +100,7 @@ def __init__(
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
use_local_files: bool = False,
+ files_directory: str = "humanloop",
):
"""
Extends the base client with custom evaluation utilities and
@@ -120,7 +121,7 @@ def __init__(
)
self.use_local_files = use_local_files
- self.sync_client = SyncClient(client=self)
+ self._sync_client = SyncClient(client=self, base_dir=files_directory)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -359,20 +360,26 @@ def agent():
attributes=attributes,
)
- def pull(self) -> List[str]:
+ def pull(self,
+ environment: str | None = None,
+ path: str | None = None
+ ) -> List[str]:
"""Pull prompt and agent files from Humanloop to local filesystem.
This method will:
1. Fetch all prompt and agent files from your Humanloop workspace
- 2. Save them to the local filesystem in a 'humanloop/' directory
+ 2. Save them to the local filesystem using the client's files_directory (set during initialization)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
+ By default, the operation will overwrite existing files with the latest version from Humanlooop
+ but will not delete local files that don't exist in the remote workspace.
+
Currently only supports syncing prompt and agent files. Other file types will be skipped.
The files will be saved with the following structure:
```
- humanloop/
+ {files_directory}/
├── prompts/
│ ├── my_prompt.prompt
│ └── nested/
@@ -381,11 +388,16 @@ def pull(self) -> List[str]:
└── my_agent.agent
```
- :return: List of successfully processed file paths
+ :param environment: The environment to pull the files from.
+ :param path: The path to the files to pull on the Humanloop workspace. Can be a directory or a specific file.
+ :return: List of successfully processed file paths.
"""
- return self.sync_client.pull()
-
+ return self._sync_client.pull(
+ environment=environment,
+ path=path
+ )
+
class AsyncHumanloop(AsyncBaseHumanloop):
"""
See docstring of AsyncBaseHumanloop.
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 97a82df1..b8b8e855 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -65,12 +65,26 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
- def pull(self) -> List[str]:
+ def pull(self,
+ environment: str | None = None,
+ directory: str | None = None,
+ path: str | None = None,
+ ) -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
+ If `path` is provided, only the file at that path will be pulled.
+ If `directory` is provided, all files in that directory will be pulled (if both `path` and `directory` are provided, `path` will take precedence).
+ If `environment` is provided, the files will be pulled from that environment.
+
+ Args:
+ environment: The environment to pull the files from.
+ directory: The directory to pull the files from.
+ path: The path of a specific file to pull from.
+
Returns:
List of successfully processed file paths
"""
+
successful_files = []
failed_files = []
page = 1
@@ -80,7 +94,8 @@ def pull(self) -> List[str]:
response = self.client.files.list_files(
type=["prompt", "agent"],
page=page,
- include_content=True
+ include_content=True,
+ environment=environment
)
if len(response.records) == 0:
@@ -93,6 +108,10 @@ def pull(self) -> List[str]:
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
+ if not file.path.startswith(path):
+ # Filter by path
+ continue
+
# Skip if no content
if not getattr(file, "content", None):
logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
index 5832d219..9c0121e5 100644
--- a/tests/sync/test_sync.py
+++ b/tests/sync/test_sync.py
@@ -140,6 +140,10 @@ def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_st
1. Create files in remote (via test_file_structure fixture)
2. Pull files locally
3. Test logging using the pulled files
+
+ :param humanloop_client: The Humanloop client with local files enabled
+ :param test_file_structure: List of test files created in remote
+ :param cleanup_local_files: Fixture to clean up local files after test
"""
# First pull the files locally
humanloop_client.pull()
From 2745020e43c26d90177ce7a0f5516106f0f90bb4 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 30 Apr 2025 14:59:29 +0000
Subject: [PATCH 16/39] Release 0.8.36
---
poetry.lock | 302 +-
pyproject.toml | 2 +-
reference.md | 5751 +++++++++++++----
src/humanloop/__init__.py | 237 +-
src/humanloop/agents/__init__.py | 49 +
src/humanloop/agents/client.py | 3210 +++++++++
src/humanloop/agents/raw_client.py | 3891 +++++++++++
src/humanloop/agents/requests/__init__.py | 25 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agent_log_request_tool_choice.py | 8 +
.../agent_request_reasoning_effort.py | 6 +
.../agents/requests/agent_request_stop.py | 5 +
.../agents/requests/agent_request_template.py | 6 +
.../requests/agent_request_tools_item.py | 7 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_request_tool_choice.py | 8 +
.../agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/agents/types/__init__.py | 25 +
.../agents/types/agent_log_request_agent.py | 6 +
.../types/agent_log_request_tool_choice.py | 8 +
.../types/agent_request_reasoning_effort.py | 6 +
.../agents/types/agent_request_stop.py | 5 +
.../agents/types/agent_request_template.py | 6 +
.../agents/types/agent_request_tools_item.py | 7 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_request_tool_choice.py | 8 +
.../types/agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/base_client.py | 4 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/files/client.py | 44 +-
src/humanloop/files/raw_client.py | 54 +-
...th_files_retrieve_by_path_post_response.py | 8 +-
...th_files_retrieve_by_path_post_response.py | 3 +-
src/humanloop/flows/client.py | 8 +-
src/humanloop/logs/client.py | 4 +-
src/humanloop/prompts/__init__.py | 16 +
src/humanloop/prompts/client.py | 267 +-
src/humanloop/prompts/raw_client.py | 335 +-
src/humanloop/prompts/requests/__init__.py | 8 +
.../requests/prompt_log_request_prompt.py | 6 +
.../prompt_request_reasoning_effort.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 8 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompt_request_reasoning_effort.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/requests/__init__.py | 88 +-
src/humanloop/requests/agent_call_response.py | 202 +
.../agent_call_response_tool_choice.py | 8 +
.../requests/agent_call_stream_response.py | 19 +
.../agent_call_stream_response_payload.py | 8 +
.../requests/agent_continue_response.py | 202 +
.../agent_continue_response_tool_choice.py | 8 +
.../agent_continue_stream_response.py | 19 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/requests/agent_inline_tool.py | 13 +
.../requests/agent_kernel_request.py | 112 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../requests/agent_kernel_request_stop.py | 5 +
.../requests/agent_kernel_request_template.py | 6 +
.../agent_kernel_request_tools_item.py | 7 +
.../requests/agent_linked_file_request.py | 13 +
.../requests/agent_linked_file_response.py | 19 +
.../agent_linked_file_response_file.py | 21 +
src/humanloop/requests/agent_log_response.py | 201 +
.../agent_log_response_tool_choice.py | 8 +
.../requests/agent_log_stream_response.py | 87 +
src/humanloop/requests/agent_response.py | 242 +
.../agent_response_reasoning_effort.py | 6 +
src/humanloop/requests/agent_response_stop.py | 5 +
.../requests/agent_response_template.py | 6 +
.../requests/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 12 +
.../requests/anthropic_thinking_content.py | 17 +
src/humanloop/requests/chat_message.py | 6 +
.../requests/chat_message_thinking_item.py | 7 +
.../requests/create_agent_log_response.py | 31 +
src/humanloop/requests/dataset_response.py | 5 +
...arents_and_children_response_files_item.py | 8 +-
src/humanloop/requests/evaluator_response.py | 5 +
.../file_environment_response_file.py | 8 +-
.../file_environment_variable_request.py | 15 +
src/humanloop/requests/flow_response.py | 5 +
src/humanloop/requests/linked_file_request.py | 10 +
src/humanloop/requests/list_agents.py | 12 +
src/humanloop/requests/log_response.py | 7 +-
src/humanloop/requests/log_stream_response.py | 7 +
.../requests/paginated_data_agent_response.py | 12 +
..._response_flow_response_agent_response.py} | 8 +-
...w_response_agent_response_records_item.py} | 14 +-
.../requests/populate_template_response.py | 16 +-
...late_template_response_reasoning_effort.py | 6 +
.../requests/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/requests/prompt_response.py | 16 +-
.../prompt_response_reasoning_effort.py | 6 +
.../requests/run_version_response.py | 3 +-
src/humanloop/requests/tool_call_response.py | 146 +
src/humanloop/requests/tool_log_response.py | 6 +
.../version_deployment_response_file.py | 8 +-
.../requests/version_id_response_version.py | 8 +-
src/humanloop/tools/client.py | 523 +-
src/humanloop/tools/raw_client.py | 765 ++-
src/humanloop/types/__init__.py | 96 +-
src/humanloop/types/agent_call_response.py | 224 +
.../types/agent_call_response_tool_choice.py | 8 +
.../types/agent_call_stream_response.py | 44 +
.../agent_call_stream_response_payload.py | 8 +
.../types/agent_continue_response.py | 224 +
.../agent_continue_response_tool_choice.py | 8 +
.../types/agent_continue_stream_response.py | 44 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/types/agent_inline_tool.py | 23 +
src/humanloop/types/agent_kernel_request.py | 122 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../types/agent_kernel_request_stop.py | 5 +
.../types/agent_kernel_request_template.py | 6 +
.../types/agent_kernel_request_tools_item.py | 7 +
.../types/agent_linked_file_request.py | 23 +
.../types/agent_linked_file_response.py | 39 +
.../types/agent_linked_file_response_file.py | 16 +
src/humanloop/types/agent_log_response.py | 224 +
.../types/agent_log_response_tool_choice.py | 8 +
.../types/agent_log_stream_response.py | 98 +
src/humanloop/types/agent_response.py | 265 +
.../types/agent_response_reasoning_effort.py | 6 +
src/humanloop/types/agent_response_stop.py | 5 +
.../types/agent_response_template.py | 6 +
.../types/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 23 +
.../types/anthropic_thinking_content.py | 28 +
src/humanloop/types/chat_message.py | 6 +
.../types/chat_message_thinking_item.py | 7 +
.../types/create_agent_log_response.py | 42 +
src/humanloop/types/dataset_response.py | 9 +
...tory_with_parents_and_children_response.py | 2 +
...arents_and_children_response_files_item.py | 3 +-
src/humanloop/types/evaluatee_response.py | 2 +
.../types/evaluation_evaluator_response.py | 2 +
.../types/evaluation_log_response.py | 3 +
src/humanloop/types/evaluation_response.py | 2 +
.../types/evaluation_run_response.py | 2 +
.../types/evaluation_runs_response.py | 2 +
src/humanloop/types/evaluator_log_response.py | 3 +
src/humanloop/types/evaluator_response.py | 11 +
src/humanloop/types/event_type.py | 21 +
.../types/file_environment_response.py | 2 +
.../types/file_environment_response_file.py | 3 +-
.../file_environment_variable_request.py | 27 +
src/humanloop/types/file_type.py | 2 +-
src/humanloop/types/files_tool_type.py | 2 +-
src/humanloop/types/flow_log_response.py | 3 +
src/humanloop/types/flow_response.py | 11 +
src/humanloop/types/linked_file_request.py | 21 +
src/humanloop/types/list_agents.py | 31 +
src/humanloop/types/list_evaluators.py | 2 +
src/humanloop/types/list_flows.py | 2 +
src/humanloop/types/list_prompts.py | 2 +
src/humanloop/types/list_tools.py | 2 +
src/humanloop/types/log_response.py | 5 +-
src/humanloop/types/log_stream_response.py | 7 +
src/humanloop/types/model_providers.py | 2 +-
.../types/monitoring_evaluator_response.py | 2 +
src/humanloop/types/on_agent_call_enum.py | 5 +
.../types/open_ai_reasoning_effort.py | 5 +
.../types/paginated_data_agent_response.py | 31 +
.../paginated_data_evaluation_log_response.py | 3 +
.../paginated_data_evaluator_response.py | 2 +
.../types/paginated_data_flow_response.py | 2 +
.../types/paginated_data_log_response.py | 3 +
.../types/paginated_data_prompt_response.py | 2 +
.../types/paginated_data_tool_response.py | 2 +
..._response_flow_response_agent_response.py} | 12 +-
...w_response_agent_response_records_item.py} | 7 +-
.../types/paginated_evaluation_response.py | 2 +
.../types/populate_template_response.py | 22 +-
...late_template_response_reasoning_effort.py | 6 +
src/humanloop/types/prompt_call_response.py | 2 +
src/humanloop/types/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/types/prompt_log_response.py | 3 +
src/humanloop/types/prompt_response.py | 22 +-
.../types/prompt_response_reasoning_effort.py | 6 +
src/humanloop/types/reasoning_effort.py | 5 -
src/humanloop/types/run_version_response.py | 3 +-
src/humanloop/types/tool_call_response.py | 168 +
src/humanloop/types/tool_log_response.py | 9 +
src/humanloop/types/tool_response.py | 2 +
.../types/version_deployment_response.py | 2 +
.../types/version_deployment_response_file.py | 3 +-
src/humanloop/types/version_id_response.py | 2 +
.../types/version_id_response_version.py | 3 +-
196 files changed, 17976 insertions(+), 1677 deletions(-)
create mode 100644 src/humanloop/agents/__init__.py
create mode 100644 src/humanloop/agents/client.py
create mode 100644 src/humanloop/agents/raw_client.py
create mode 100644 src/humanloop/agents/requests/__init__.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/requests/agent_request_stop.py
create mode 100644 src/humanloop/agents/requests/agent_request_template.py
create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/__init__.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/types/agent_request_stop.py
create mode 100644 src/humanloop/agents/types/agent_request_template.py
create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/requests/agent_call_response.py
create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_call_stream_response.py
create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_continue_response.py
create mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_inline_tool.py
create mode 100644 src/humanloop/requests/agent_kernel_request.py
create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py
create mode 100644 src/humanloop/requests/agent_kernel_request_template.py
create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/requests/agent_linked_file_request.py
create mode 100644 src/humanloop/requests/agent_linked_file_response.py
create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py
create mode 100644 src/humanloop/requests/agent_log_response.py
create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_log_stream_response.py
create mode 100644 src/humanloop/requests/agent_response.py
create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_response_stop.py
create mode 100644 src/humanloop/requests/agent_response_template.py
create mode 100644 src/humanloop/requests/agent_response_tools_item.py
create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/requests/anthropic_thinking_content.py
create mode 100644 src/humanloop/requests/chat_message_thinking_item.py
create mode 100644 src/humanloop/requests/create_agent_log_response.py
create mode 100644 src/humanloop/requests/file_environment_variable_request.py
create mode 100644 src/humanloop/requests/linked_file_request.py
create mode 100644 src/humanloop/requests/list_agents.py
create mode 100644 src/humanloop/requests/log_stream_response.py
create mode 100644 src/humanloop/requests/paginated_data_agent_response.py
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%)
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%)
create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/tool_call_response.py
create mode 100644 src/humanloop/types/agent_call_response.py
create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_call_stream_response.py
create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_continue_response.py
create mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_continue_stream_response.py
create mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_inline_tool.py
create mode 100644 src/humanloop/types/agent_kernel_request.py
create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_kernel_request_stop.py
create mode 100644 src/humanloop/types/agent_kernel_request_template.py
create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/types/agent_linked_file_request.py
create mode 100644 src/humanloop/types/agent_linked_file_response.py
create mode 100644 src/humanloop/types/agent_linked_file_response_file.py
create mode 100644 src/humanloop/types/agent_log_response.py
create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_log_stream_response.py
create mode 100644 src/humanloop/types/agent_response.py
create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_response_stop.py
create mode 100644 src/humanloop/types/agent_response_template.py
create mode 100644 src/humanloop/types/agent_response_tools_item.py
create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/types/anthropic_thinking_content.py
create mode 100644 src/humanloop/types/chat_message_thinking_item.py
create mode 100644 src/humanloop/types/create_agent_log_response.py
create mode 100644 src/humanloop/types/event_type.py
create mode 100644 src/humanloop/types/file_environment_variable_request.py
create mode 100644 src/humanloop/types/linked_file_request.py
create mode 100644 src/humanloop/types/list_agents.py
create mode 100644 src/humanloop/types/log_stream_response.py
create mode 100644 src/humanloop/types/on_agent_call_enum.py
create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py
create mode 100644 src/humanloop/types/paginated_data_agent_response.py
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (76%)
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%)
create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py
delete mode 100644 src/humanloop/types/reasoning_effort.py
create mode 100644 src/humanloop/types/tool_call_response.py
diff --git a/poetry.lock b/poetry.lock
index 4ce5d536..cfe8a240 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -78,13 +78,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
-version = "2025.1.31"
+version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
- {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
+ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
+ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.0"
+version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"},
- {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"},
+ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
+ {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
]
[package.dependencies]
@@ -403,29 +403,29 @@ typing-extensions = ">=4.10,<5"
[[package]]
name = "h11"
-version = "0.14.0"
+version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
+ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
-version = "1.0.8"
+version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
- {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
+ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
+ {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
-h11 = ">=0.13,<0.15"
+h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.75.0"
+version = "1.76.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"},
- {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"},
+ {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
+ {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
]
[package.dependencies]
@@ -931,30 +931,30 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"},
- {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"},
- {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
]
[package.dependencies]
@@ -962,77 +962,77 @@ anthropic = ">=0.17.0"
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"},
- {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"},
- {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"},
- {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"},
- {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-proto"
@@ -1081,13 +1081,13 @@ opentelemetry-api = "1.32.1"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.3"
+version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
]
[[package]]
@@ -1320,18 +1320,18 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"]
[[package]]
name = "pydantic"
-version = "2.11.3"
+version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"},
- {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"},
+ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
+ {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.33.1"
+pydantic-core = "2.33.2"
typing-extensions = ">=4.12.2"
typing-inspection = ">=0.4.0"
@@ -1341,110 +1341,110 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.33.1"
+version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"},
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5183e4f6a2d468787243ebcd70cf4098c247e60d73fb7d68d5bc1e1beaa0c4db"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:398a38d323f37714023be1e0285765f0a27243a8b1506b7b7de87b647b517e48"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3776f0001b43acebfa86f8c64019c043b55cc5a6a2e313d728b5c95b46969"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c566dd9c5f63d22226409553531f89de0cac55397f2ab8d97d6f06cfce6d947e"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d5f3acc81452c56895e90643a625302bd6be351e7010664151cc55b7b97f89"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3a07fadec2a13274a8d861d3d37c61e97a816beae717efccaa4b36dfcaadcde"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f99aeda58dce827f76963ee87a0ebe75e648c72ff9ba1174a253f6744f518f65"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:902dbc832141aa0ec374f4310f1e4e7febeebc3256f00dc359a9ac3f264a45dc"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe44d56aa0b00d66640aa84a3cbe80b7a3ccdc6f0b1ca71090696a6d4777c091"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win32.whl", hash = "sha256:ed3eb16d51257c763539bde21e011092f127a2202692afaeaccb50db55a31383"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win_amd64.whl", hash = "sha256:694ad99a7f6718c1a498dc170ca430687a39894a60327f548e02a9c7ee4b6504"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5ab77f45d33d264de66e1884fca158bc920cb5e27fd0764a72f72f5756ae8bdb"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7aaba1b4b03aaea7bb59e1b5856d734be011d3e6d98f5bcaa98cb30f375f2ad"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb66263e9ba8fea2aa85e1e5578980d127fb37d7f2e292773e7bc3a38fb0c7b"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f2648b9262607a7fb41d782cc263b48032ff7a03a835581abbf7a3bec62bcf5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723c5630c4259400818b4ad096735a829074601805d07f8cafc366d95786d331"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d100e3ae783d2167782391e0c1c7a20a31f55f8015f3293647544df3f9c67824"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177d50460bc976a0369920b6c744d927b0ecb8606fb56858ff542560251b19e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3edde68d1a1f9af1273b2fe798997b33f90308fb6d44d8550c89fc6a3647cf6"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a62c3c3ef6a7e2c45f7853b10b5bc4ddefd6ee3cd31024754a1a5842da7d598d"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:c91dbb0ab683fa0cd64a6e81907c8ff41d6497c346890e26b23de7ee55353f96"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f466e8bf0a62dc43e068c12166281c2eca72121dd2adc1040f3aa1e21ef8599"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win32.whl", hash = "sha256:ab0277cedb698749caada82e5d099dc9fed3f906a30d4c382d1a21725777a1e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win_amd64.whl", hash = "sha256:5773da0ee2d17136b1f1c6fbde543398d452a6ad2a7b54ea1033e2daa739b8d2"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c834f54f8f4640fd7e4b193f80eb25a0602bba9e19b3cd2fc7ffe8199f5ae02"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:049e0de24cf23766f12cc5cc71d8abc07d4a9deb9061b334b62093dedc7cb068"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a28239037b3d6f16916a4c831a5a0eadf856bdd6d2e92c10a0da3a59eadcf3e"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3da303ab5f378a268fa7d45f37d7d85c3ec19769f28d2cc0c61826a8de21fe"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25626fb37b3c543818c14821afe0fd3830bc327a43953bc88db924b68c5723f1"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3ab2d36e20fbfcce8f02d73c33a8a7362980cff717926bbae030b93ae46b56c7"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2f9284e11c751b003fd4215ad92d325d92c9cb19ee6729ebd87e3250072cdcde"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:048c01eee07d37cbd066fc512b9d8b5ea88ceeb4e629ab94b3e56965ad655add"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5ccd429694cf26af7997595d627dd2637e7932214486f55b8a357edaac9dae8c"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7edbc454a29fc6aeae1e1eecba4f07b63b8d76e76a748532233c4c167b4cb9ea"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ad05b683963f69a1d5d2c2bdab1274a31221ca737dbbceaa32bcb67359453cdd"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df6a94bf9452c6da9b5d76ed229a5683d0306ccb91cca8e1eea883189780d568"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7965c13b3967909a09ecc91f21d09cfc4576bf78140b988904e94f130f188396"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3f1fdb790440a34f6ecf7679e1863b825cb5ffde858a9197f851168ed08371e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5277aec8d879f8d05168fdd17ae811dd313b8ff894aeeaf7cd34ad28b4d77e33"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8ab581d3530611897d863d1a649fb0644b860286b4718db919bfd51ece41f10b"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0483847fa9ad5e3412265c1bd72aad35235512d9ce9d27d81a56d935ef489672"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:de9e06abe3cc5ec6a2d5f75bc99b0bdca4f5c719a5b34026f8c57efbdecd2ee3"},
- {file = "pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
+ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
]
[package.dependencies]
@@ -1729,13 +1729,13 @@ files = [
[[package]]
name = "replicate"
-version = "1.0.4"
+version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
files = [
- {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"},
- {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"},
+ {file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
+ {file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index ad96beec..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.35"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 27a32c92..8d531f66 100644
--- a/reference.md
+++ b/reference.md
@@ -56,7 +56,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1501,7 +1516,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2533,7 @@ client.prompts.update_monitoring(
-## Tools
-client.tools.log(...)
+client.prompts.serialize(...)
-
@@ -2531,15 +2545,13 @@ client.prompts.update_monitoring(
-
-Log to a Tool.
+Serialize a Prompt to the .prompt file format.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Tool. Otherwise the default deployed version will be chosen.
+Useful for storing the Prompt with your code in a version control system,
+or for editing with an AI tool.
-Instead of targeting an existing version explicitly, you can instead pass in
-Tool details in the request body. In this case, we will check if the details correspond
-to an existing version of the Tool, if not we will create a new version. This is helpful
-in the case where you are storing or deriving your Tool details in code.
+By default, the deployed version of the Prompt is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2571,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
+client.prompts.serialize(
+ id="id",
)
```
@@ -2593,7 +2589,7 @@ client.tools.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
+**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2597,7 @@ client.tools.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2605,7 @@ client.tools.log(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2613,72 @@ client.tools.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.prompts.deserialize(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deserialize a Prompt from the .prompt file format.
+
+This returns a subset of the attributes required by a Prompt.
+This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+#### 🔌 Usage
+
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.deserialize(
+ prompt="prompt",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+-
+
+**prompt:** `str`
@@ -2649,15 +2686,78 @@ client.tools.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## Tools
+client.tools.call(...)
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call a Tool.
+
+Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2765,7 @@ client.tools.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2773,7 @@ client.tools.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2781,7 @@ client.tools.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2789,7 @@ client.tools.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2821,7 @@ client.tools.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2829,7 @@ client.tools.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2837,7 @@ client.tools.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2845,7 @@ client.tools.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2853,7 @@ client.tools.log(
-
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2861,7 @@ client.tools.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2869,7 @@ client.tools.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2877,15 @@ client.tools.log(
-
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2905,7 @@ client.tools.log(
-client.tools.update(...)
+client.tools.log(...)
-
@@ -2809,9 +2917,15 @@ client.tools.log(
-
-Update a Log.
+Log to a Tool.
-Update the details of a Log with the given ID.
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool, if not we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2945,24 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update(
- id="id",
- log_id="log_id",
+client.tools.log(
+ path="math-tool",
+ tool={
+ "function": {
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": "number"},
+ },
+ "required": ["a", "b"],
+ },
+ }
+ },
+ inputs={"a": 5, "b": 7},
+ output="35",
)
```
@@ -2850,7 +2979,7 @@ client.tools.update(
-
-**id:** `str` — Unique identifier for Prompt.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2987,7 @@ client.tools.update(
-
-**log_id:** `str` — Unique identifier for the Log.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2995,7 @@ client.tools.update(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +3003,7 @@ client.tools.update(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +3011,7 @@ client.tools.update(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3019,7 @@ client.tools.update(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3027,7 @@ client.tools.update(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3035,7 @@ client.tools.update(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3043,7 @@ client.tools.update(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3051,7 @@ client.tools.update(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3059,7 @@ client.tools.update(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3067,7 @@ client.tools.update(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3075,7 @@ client.tools.update(
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3083,7 @@ client.tools.update(
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3091,7 @@ client.tools.update(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,74 +3099,31 @@ client.tools.update(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.tools.list(...)
-
-#### 📝 Description
-
-
--
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
-Get a list of all Tools.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.tools.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3045,7 +3131,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -3053,7 +3139,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -3061,7 +3147,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -3069,7 +3155,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
@@ -3077,7 +3163,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -3097,7 +3183,7 @@ for page in response.iter_pages():
-client.tools.upsert(...)
+client.tools.update(...)
-
@@ -3109,13 +3195,9 @@ for page in response.iter_pages():
-
-Create a Tool or update it with a new version if it already exists.
-
-Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+Update a Log.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Tool - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Update the details of a Log with the given ID.
@@ -3135,19 +3217,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
+client.tools.update(
+ id="id",
+ log_id="log_id",
)
```
@@ -3164,7 +3236,7 @@ client.tools.upsert(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Prompt.
@@ -3172,7 +3244,7 @@ client.tools.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**log_id:** `str` — Unique identifier for the Log.
@@ -3180,7 +3252,7 @@ client.tools.upsert(
-
-**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -3188,7 +3260,7 @@ client.tools.upsert(
-
-**source_code:** `typing.Optional[str]` — Code source of the Tool.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -3196,7 +3268,7 @@ client.tools.upsert(
-
-**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -3204,7 +3276,7 @@ client.tools.upsert(
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -3212,7 +3284,7 @@ client.tools.upsert(
-
-**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -3220,7 +3292,7 @@ client.tools.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -3228,7 +3300,7 @@ client.tools.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the Version.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -3236,72 +3308,31 @@ client.tools.upsert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
-
-
-
-
-
-
-
-client.tools.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
-Retrieve the Tool with the given ID.
-
-By default, the deployed version of the Tool is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Tool.
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get(
- id="tl_789ghi",
-)
-
-```
-
-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -3309,7 +3340,7 @@ client.tools.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -3317,7 +3348,7 @@ client.tools.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -3337,7 +3368,7 @@ client.tools.get(
-client.tools.delete(...)
+client.tools.list(...)
-
@@ -3349,7 +3380,7 @@ client.tools.get(
-
-Delete the Tool with the given ID.
+Get a list of all Tools.
@@ -3369,9 +3400,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete(
- id="tl_789ghi",
+response = client.tools.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -3387,7 +3423,7 @@ client.tools.delete(
-
-**id:** `str` — Unique identifier for Tool.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -3395,70 +3431,23 @@ client.tools.delete(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
-
-
-
-
-
-
-
-client.tools.move(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Move the Tool to a different path or change the name.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
-)
-
-```
-
-
+**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
@@ -3466,7 +3455,7 @@ client.tools.move(
-
-**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
@@ -3474,7 +3463,7 @@ client.tools.move(
-
-**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -3494,7 +3483,7 @@ client.tools.move(
-client.tools.list_versions(...)
+client.tools.upsert(...)
-
@@ -3506,7 +3495,13 @@ client.tools.move(
-
-Get a list of all the versions of a Tool.
+Create a Tool or update it with a new version if it already exists.
+
+Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Tool - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -3526,8 +3521,19 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_versions(
- id="tl_789ghi",
+client.tools.upsert(
+ path="math-tool",
+ function={
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
+ "required": ["a", "b"],
+ },
+ },
+ version_name="math-tool-v1",
+ version_description="Simple math tool that multiplies two numbers",
)
```
@@ -3544,7 +3550,7 @@ client.tools.list_versions(
-
-**id:** `str` — Unique identifier for the Tool.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -3552,7 +3558,63 @@ client.tools.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**id:** `typing.Optional[str]` — ID for an existing Tool.
+
+
+
+
+
+-
+
+**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+
+
+
+
+
+-
+
+**source_code:** `typing.Optional[str]` — Code source of the Tool.
+
+
+
+
+
+-
+
+**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+
+
+
+
+
+-
+
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+
+
+
+
+
+-
+
+**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -3572,7 +3634,7 @@ client.tools.list_versions(
-client.tools.delete_tool_version(...)
+client.tools.get(...)
-
@@ -3584,7 +3646,10 @@ client.tools.list_versions(
-
-Delete a version of the Tool.
+Retrieve the Tool with the given ID.
+
+By default, the deployed version of the Tool is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Tool.
@@ -3604,9 +3669,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
+client.tools.get(
+ id="tl_789ghi",
)
```
@@ -3631,7 +3695,15 @@ client.tools.delete_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -3651,7 +3723,7 @@ client.tools.delete_tool_version(
-client.tools.update_tool_version(...)
+client.tools.delete(...)
-
@@ -3663,7 +3735,7 @@ client.tools.delete_tool_version(
-
-Update the name or description of the Tool version.
+Delete the Tool with the given ID.
@@ -3683,9 +3755,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_tool_version(
- id="id",
- version_id="version_id",
+client.tools.delete(
+ id="tl_789ghi",
)
```
@@ -3710,30 +3781,6 @@ client.tools.update_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
-
-
-
-
-
--
-
-**description:** `typing.Optional[str]` — Description of the version.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3746,7 +3793,7 @@ client.tools.update_tool_version(
-client.tools.set_deployment(...)
+client.tools.move(...)
-
@@ -3758,10 +3805,7 @@ client.tools.update_tool_version(
-
-Deploy Tool to an Environment.
-
-Set the deployed version for the specified Environment. This Prompt
-will be used for calls made to the Tool in this Environment.
+Move the Tool to a different path or change the name.
@@ -3781,10 +3825,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.set_deployment(
+client.tools.move(
id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
+ path="new directory/new name",
)
```
@@ -3809,7 +3852,7 @@ client.tools.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
@@ -3817,7 +3860,7 @@ client.tools.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
@@ -3837,7 +3880,7 @@ client.tools.set_deployment(
-client.tools.remove_deployment(...)
+client.tools.list_versions(...)
-
@@ -3849,10 +3892,7 @@ client.tools.set_deployment(
-
-Remove deployed Tool from the Environment.
-
-Remove the deployed version for the specified Environment. This Tool
-will no longer be used for calls made to the Tool in this Environment.
+Get a list of all the versions of a Tool.
@@ -3872,9 +3912,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.remove_deployment(
+client.tools.list_versions(
id="tl_789ghi",
- environment_id="staging",
)
```
@@ -3891,7 +3930,7 @@ client.tools.remove_deployment(
-
-**id:** `str` — Unique identifier for Tool.
+**id:** `str` — Unique identifier for the Tool.
@@ -3899,7 +3938,7 @@ client.tools.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -3919,7 +3958,7 @@ client.tools.remove_deployment(
-client.tools.list_environments(...)
+client.tools.delete_tool_version(...)
-
@@ -3931,7 +3970,7 @@ client.tools.remove_deployment(
-
-List all Environments and their deployed versions for the Tool.
+Delete a version of the Tool.
@@ -3951,8 +3990,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_environments(
- id="tl_789ghi",
+client.tools.delete_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -3977,6 +4017,14 @@ client.tools.list_environments(
-
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3989,7 +4037,7 @@ client.tools.list_environments(
-client.tools.update_monitoring(...)
+client.tools.update_tool_version(...)
-
@@ -4001,10 +4049,7 @@ client.tools.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Tool.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Tool for monitoring purposes.
+Update the name or description of the Tool version.
@@ -4024,9 +4069,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.tools.update_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -4043,7 +4088,7 @@ client.tools.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Tool.
@@ -4051,9 +4096,7 @@ client.tools.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4061,9 +4104,15 @@ client.tools.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -4083,8 +4132,7 @@ client.tools.update_monitoring(
-## Datasets
-client.datasets.list(...)
+client.tools.set_deployment(...)
-
@@ -4096,7 +4144,10 @@ client.tools.update_monitoring(
-
-List all Datasets.
+Deploy Tool to an Environment.
+
+Set the deployed version for the specified Environment. This Prompt
+will be used for calls made to the Tool in this Environment.
@@ -4116,14 +4167,11 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list(
- size=1,
+client.tools.set_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
+ version_id="tv_012jkl",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4139,7 +4187,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for Tool.
@@ -4147,7 +4195,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -4155,7 +4203,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4163,47 +4211,40 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
-
+
+client.tools.remove_deployment(...)
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
-
-
+#### 📝 Description
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+
+-
+Remove deployed Tool from the Environment.
+Remove the deployed version for the specified Environment. This Tool
+will no longer be used for calls made to the Tool in this Environment.
+
+
-
-
-client.datasets.upsert(...)
-
--
-#### 📝 Description
+#### 🔌 Usage
-
@@ -4211,70 +4252,15 @@ for page in response.iter_pages():
-
-Create a Dataset or update it with a new version if it already exists.
-
-Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
-
-By default, the new Dataset version will be set to the list of Datapoints provided in
-the request. You can also create a new version by adding or removing Datapoints from an existing version
-by specifying `action` as `add` or `remove` respectively. In this case, you may specify
-the `version_id` or `environment` query parameters to identify the existing version to base
-the new version on. If neither is provided, the latest created version will be used.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Dataset - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
-
-Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
-exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
-you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
+```python
+from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upsert(
- path="datasets/support-queries",
- datapoints=[
- {
- "messages": [
- {
- "role": "user",
- "content": "How do i manage my organizations API keys?\n",
- }
- ],
- "target": {
- "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
- },
- },
- {
- "messages": [
- {
- "role": "user",
- "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
- }
- ],
- "target": {
- "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
- },
- },
- ],
- version_name="Initial version",
- version_description="Add two new questions and answers",
+client.tools.remove_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
)
```
@@ -4291,7 +4277,7 @@ client.datasets.upsert(
-
-**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
+**id:** `str` — Unique identifier for Tool.
@@ -4299,7 +4285,7 @@ client.datasets.upsert(
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -4307,71 +4293,69 @@ client.datasets.upsert(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
+client.tools.list_environments(...)
-
-**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
+#### 📝 Description
-
-**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
+-
+
+List all Environments and their deployed versions for the Tool.
+
+
+#### 🔌 Usage
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+
+-
-The action to take with the provided Datapoints.
+```python
+from humanloop import Humanloop
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.list_environments(
+ id="tl_789ghi",
+)
-If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+```
-
-
--
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+#### ⚙️ Parameters
+
-
-**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
-
-
-
-
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**id:** `str` — Unique identifier for Tool.
@@ -4391,7 +4375,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-client.datasets.get(...)
+client.tools.update_monitoring(...)
-
@@ -4403,15 +4387,10 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-
-Retrieve the Dataset with the given ID.
-
-Unless `include_datapoints` is set to `true`, the response will not include
-the Datapoints.
-Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
-retrieve Datapoints for a large Dataset.
+Activate and deactivate Evaluators for monitoring the Tool.
-By default, the deployed version of the Dataset is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Dataset.
+An activated Evaluator will automatically be run on all new Logs
+within the Tool for monitoring purposes.
@@ -4431,10 +4410,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
+client.tools.update_monitoring(
+ id="tl_789ghi",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
)
```
@@ -4451,15 +4429,7 @@ client.datasets.get(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+**id:** `str`
@@ -4467,7 +4437,9 @@ client.datasets.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -4475,7 +4447,9 @@ client.datasets.get(
-
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -4495,24 +4469,10 @@ client.datasets.get(
-client.datasets.delete(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.get_environment_variables(...)
-
-Delete the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4527,7 +4487,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete(
+client.tools.get_environment_variables(
id="id",
)
@@ -4545,7 +4505,7 @@ client.datasets.delete(
-
-**id:** `str` — Unique identifier for Dataset.
+**id:** `str` — Unique identifier for File.
@@ -4565,7 +4525,7 @@ client.datasets.delete(
-client.datasets.move(...)
+client.tools.add_environment_variable(...)
-
@@ -4577,7 +4537,7 @@ client.datasets.delete(
-
-Move the Dataset to a different path or change the name.
+Add an environment variable to a Tool.
@@ -4597,8 +4557,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.move(
+client.tools.add_environment_variable(
id="id",
+ request=[{"name": "name", "value": "value"}],
)
```
@@ -4615,15 +4576,7 @@ client.datasets.move(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Tool.
@@ -4631,7 +4584,7 @@ client.datasets.move(
-
-**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
@@ -4651,24 +4604,10 @@ client.datasets.move(
-client.datasets.list_datapoints(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.delete_environment_variable(...)
-
-List all Datapoints for the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4683,15 +4622,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
+client.tools.delete_environment_variable(
+ id="id",
+ name="name",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4707,31 +4641,7 @@ for page in response.iter_pages():
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
-
-
-
-
--
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
-
-
-
-
--
-
-**page:** `typing.Optional[int]` — Page number for pagination.
+**id:** `str` — Unique identifier for File.
@@ -4739,7 +4649,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+**name:** `str` — Name of the Environment Variable to delete.
@@ -4759,7 +4669,8 @@ for page in response.iter_pages():
-client.datasets.list_versions(...)
+## Datasets
+client.datasets.list(...)
-
@@ -4771,7 +4682,7 @@ for page in response.iter_pages():
-
-Get a list of the versions for a Dataset.
+List all Datasets.
@@ -4791,9 +4702,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_versions(
- id="ds_b0baF1ca7652",
+response = client.datasets.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -4809,7 +4725,7 @@ client.datasets.list_versions(
-
-**id:** `str` — Unique identifier for Dataset.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -4817,7 +4733,39 @@ client.datasets.list_versions(
-
-**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -4837,7 +4785,7 @@ client.datasets.list_versions(
-client.datasets.delete_dataset_version(...)
+client.datasets.upsert(...)
-
@@ -4849,7 +4797,23 @@ client.datasets.list_versions(
-
-Delete a version of the Dataset.
+Create a Dataset or update it with a new version if it already exists.
+
+Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
+
+By default, the new Dataset version will be set to the list of Datapoints provided in
+the request. You can also create a new version by adding or removing Datapoints from an existing version
+by specifying `action` as `add` or `remove` respectively. In this case, you may specify
+the `version_id` or `environment` query parameters to identify the existing version to base
+the new version on. If neither is provided, the latest created version will be used.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Dataset - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
+exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
+you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
@@ -4869,9 +4833,34 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
+client.datasets.upsert(
+ path="datasets/support-queries",
+ datapoints=[
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "How do i manage my organizations API keys?\n",
+ }
+ ],
+ "target": {
+ "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
+ },
+ },
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
+ }
+ ],
+ "target": {
+ "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
+ },
+ },
+ ],
+ version_name="Initial version",
+ version_description="Add two new questions and answers",
)
```
@@ -4888,7 +4877,7 @@ client.datasets.delete_dataset_version(
-
-**id:** `str` — Unique identifier for Dataset.
+**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
@@ -4896,7 +4885,7 @@ client.datasets.delete_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
@@ -4904,70 +4893,47 @@ client.datasets.delete_dataset_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
-
-
-
-
-
-
-client.datasets.update_dataset_version(...)
-
-#### 📝 Description
-
-
--
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+
+
+
-
-Update the name or description of the Dataset version.
-
-
+**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**id:** `typing.Optional[str]` — ID for an existing Dataset.
+
-#### ⚙️ Parameters
-
-
-
--
+**action:** `typing.Optional[UpdateDatesetAction]`
-**id:** `str` — Unique identifier for Dataset.
+The action to take with the provided Datapoints.
+
+ - If `"set"`, the created version will only contain the Datapoints provided in this request.
+ - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+ - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+
+If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
@@ -4975,7 +4941,7 @@ client.datasets.update_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
@@ -4983,7 +4949,7 @@ client.datasets.update_dataset_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
@@ -4991,7 +4957,7 @@ client.datasets.update_dataset_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -5011,7 +4977,7 @@ client.datasets.update_dataset_version(
-client.datasets.upload_csv(...)
+client.datasets.get(...)
-
@@ -5023,17 +4989,15 @@ client.datasets.update_dataset_version(
-
-Add Datapoints from a CSV file to a Dataset.
-
-This will create a new version of the Dataset with the Datapoints from the CSV file.
+Retrieve the Dataset with the given ID.
-If either `version_id` or `environment` is provided, the new version will be based on the specified version,
-with the Datapoints from the CSV file added to the existing Datapoints in the version.
-If neither `version_id` nor `environment` is provided, the new version will be based on the version
-of the Dataset that is deployed to the default Environment.
+Unless `include_datapoints` is set to `true`, the response will not include
+the Datapoints.
+Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
+retrieve Datapoints for a large Dataset.
-You can optionally provide a name and description for the new version using `version_name`
-and `version_description` parameters.
+By default, the deployed version of the Dataset is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Dataset.
@@ -5053,8 +5017,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upload_csv(
- id="id",
+client.datasets.get(
+ id="ds_b0baF1ca7652",
+ version_id="dsv_6L78pqrdFi2xa",
+ include_datapoints=True,
)
```
@@ -5071,25 +5037,7 @@ client.datasets.upload_csv(
-
-**id:** `str` — Unique identifier for the Dataset
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
+**id:** `str` — Unique identifier for Dataset.
@@ -5097,7 +5045,7 @@ core.File` — See core.File for more documentation
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
@@ -5105,7 +5053,7 @@ core.File` — See core.File for more documentation
-
-**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -5113,7 +5061,7 @@ core.File` — See core.File for more documentation
-
-**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
@@ -5133,7 +5081,7 @@ core.File` — See core.File for more documentation
-client.datasets.set_deployment(...)
+client.datasets.delete(...)
-
@@ -5145,9 +5093,7 @@ core.File` — See core.File for more documentation
-
-Deploy Dataset to Environment.
-
-Set the deployed version for the specified Environment.
+Delete the Dataset with the given ID.
@@ -5167,10 +5113,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
+client.datasets.delete(
+ id="id",
)
```
@@ -5195,22 +5139,6 @@ client.datasets.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5223,7 +5151,7 @@ client.datasets.set_deployment(
-client.datasets.remove_deployment(...)
+client.datasets.move(...)
-
@@ -5235,9 +5163,7 @@ client.datasets.set_deployment(
-
-Remove deployed Dataset from Environment.
-
-Remove the deployed version for the specified Environment.
+Move the Dataset to a different path or change the name.
@@ -5257,9 +5183,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
+client.datasets.move(
+ id="id",
)
```
@@ -5284,7 +5209,15 @@ client.datasets.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
@@ -5304,7 +5237,7 @@ client.datasets.remove_deployment(
-client.datasets.list_environments(...)
+client.datasets.list_datapoints(...)
-
@@ -5316,7 +5249,7 @@ client.datasets.remove_deployment(
-
-List all Environments and their deployed versions for the Dataset.
+List all Datapoints for the Dataset with the given ID.
@@ -5336,9 +5269,15 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_environments(
- id="id",
+response = client.datasets.list_datapoints(
+ id="ds_b0baF1ca7652",
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -5362,6 +5301,38 @@ client.datasets.list_environments(
-
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5374,8 +5345,7 @@ client.datasets.list_environments(
-## Evaluators
-client.evaluators.log(...)
+client.datasets.list_versions(...)
-
@@ -5387,9 +5357,7 @@ client.datasets.list_environments(
-
-Submit Evaluator judgment for an existing Log.
-
-Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+Get a list of the versions for a Dataset.
@@ -5409,8 +5377,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.log(
- parent_id="parent_id",
+client.datasets.list_versions(
+ id="ds_b0baF1ca7652",
)
```
@@ -5427,7 +5395,7 @@ client.evaluators.log(
-
-**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
+**id:** `str` — Unique identifier for Dataset.
@@ -5435,7 +5403,7 @@ client.evaluators.log(
-
-**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
+**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
@@ -5443,103 +5411,70 @@ client.evaluators.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
--
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
+client.datasets.delete_dataset_version(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
-
-
+#### 📝 Description
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
-
-
-
-
-**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+Delete a version of the Dataset.
-
-
--
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
-
-
+```python
+from humanloop import Humanloop
-
--
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.delete_dataset_version(
+ id="id",
+ version_id="version_id",
+)
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+```
-
-
--
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+#### ⚙️ Parameters
+
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
-
-
-
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `str` — Unique identifier for Dataset.
@@ -5547,7 +5482,7 @@ client.evaluators.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5555,71 +5490,70 @@ client.evaluators.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
+client.datasets.update_dataset_version(...)
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
-
-
+#### 📝 Description
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
-
-
-
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+Update the name or description of the Dataset version.
+
+
+
+#### 🔌 Usage
-
-**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
-
-
-
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.update_dataset_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
-
-
-
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+**id:** `str` — Unique identifier for Dataset.
@@ -5627,7 +5561,7 @@ client.evaluators.log(
-
-**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5635,7 +5569,7 @@ client.evaluators.log(
-
-**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -5643,7 +5577,7 @@ client.evaluators.log(
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+**description:** `typing.Optional[str]` — Description of the version.
@@ -5663,7 +5597,7 @@ client.evaluators.log(
-client.evaluators.list(...)
+client.datasets.upload_csv(...)
-
@@ -5675,7 +5609,17 @@ client.evaluators.log(
-
-Get a list of all Evaluators.
+Add Datapoints from a CSV file to a Dataset.
+
+This will create a new version of the Dataset with the Datapoints from the CSV file.
+
+If either `version_id` or `environment` is provided, the new version will be based on the specified version,
+with the Datapoints from the CSV file added to the existing Datapoints in the version.
+If neither `version_id` nor `environment` is provided, the new version will be based on the version
+of the Dataset that is deployed to the default Environment.
+
+You can optionally provide a name and description for the new version using `version_name`
+and `version_description` parameters.
@@ -5695,14 +5639,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.evaluators.list(
- size=1,
+client.datasets.upload_csv(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -5718,7 +5657,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for the Dataset
@@ -5726,7 +5665,9 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+**file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5734,7 +5675,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
@@ -5742,7 +5683,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
@@ -5750,7 +5691,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
@@ -5758,7 +5699,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
@@ -5778,7 +5719,7 @@ for page in response.iter_pages():
-client.evaluators.upsert(...)
+client.datasets.set_deployment(...)
-
@@ -5790,13 +5731,9 @@ for page in response.iter_pages():
-
-Create an Evaluator or update it with a new version if it already exists.
-
-Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+Deploy Dataset to Environment.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within an Evaluator - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Set the deployed version for the specified Environment.
@@ -5816,19 +5753,13 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
-)
-
-```
+client.datasets.set_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
+ version_id="dsv_6L78pqrdFi2xa",
+)
+
+```
@@ -5842,23 +5773,7 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
-
-
--
-
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -5866,7 +5781,7 @@ client.evaluators.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -5874,7 +5789,7 @@ client.evaluators.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5894,7 +5809,7 @@ client.evaluators.upsert(
-client.evaluators.get(...)
+client.datasets.remove_deployment(...)
-
@@ -5906,10 +5821,9 @@ client.evaluators.upsert(
-
-Retrieve the Evaluator with the given ID.
+Remove deployed Dataset from Environment.
-By default, the deployed version of the Evaluator is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Evaluator.
+Remove the deployed version for the specified Environment.
@@ -5929,8 +5843,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.get(
- id="ev_890bcd",
+client.datasets.remove_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
)
```
@@ -5947,15 +5862,7 @@ client.evaluators.get(
-
-**id:** `str` — Unique identifier for Evaluator.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+**id:** `str` — Unique identifier for Dataset.
@@ -5963,7 +5870,7 @@ client.evaluators.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -5983,7 +5890,7 @@ client.evaluators.get(
-client.evaluators.delete(...)
+client.datasets.list_environments(...)
-
@@ -5995,7 +5902,7 @@ client.evaluators.get(
-
-Delete the Evaluator with the given ID.
+List all Environments and their deployed versions for the Dataset.
@@ -6015,8 +5922,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.delete(
- id="ev_890bcd",
+client.datasets.list_environments(
+ id="id",
)
```
@@ -6033,7 +5940,7 @@ client.evaluators.delete(
-
-**id:** `str` — Unique identifier for Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -6053,7 +5960,8 @@ client.evaluators.delete(
-client.evaluators.move(...)
+## Evaluators
+client.evaluators.log(...)
-
@@ -6065,7 +5973,9 @@ client.evaluators.delete(
-
-Move the Evaluator to a different path or change the name.
+Submit Evaluator judgment for an existing Log.
+
+Creates a new Log. The evaluated Log will be set as the parent of the created Log.
@@ -6085,9 +5995,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
+client.evaluators.log(
+ parent_id="parent_id",
)
```
@@ -6104,7 +6013,7 @@ client.evaluators.move(
-
-**id:** `str` — Unique identifier for Evaluator.
+**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
@@ -6112,7 +6021,7 @@ client.evaluators.move(
-
-**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
@@ -6120,7 +6029,7 @@ client.evaluators.move(
-
-**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6128,69 +6037,79 @@ client.evaluators.move(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
+
+-
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
-
-client.evaluators.list_versions(...)
-
-#### 📝 Description
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
-
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
-
-Get a list of all the versions of an Evaluator.
-
-
+**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
+
-#### 🔌 Usage
-
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_versions(
- id="ev_890bcd",
-)
-
-```
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
-#### ⚙️ Parameters
-
-
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
-
-**id:** `str` — Unique identifier for the Evaluator.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
@@ -6198,7 +6117,7 @@ client.evaluators.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
@@ -6206,70 +6125,103 @@ client.evaluators.list_versions(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.evaluators.delete_evaluator_version(...)
-
-#### 📝 Description
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
-
-Delete a version of the Evaluator.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
-#### 🔌 Usage
-
-
+**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
+
+-
-```
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+
-#### ⚙️ Parameters
-
-
+**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
@@ -6277,7 +6229,7 @@ client.evaluators.delete_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
@@ -6297,7 +6249,7 @@ client.evaluators.delete_evaluator_version(
-client.evaluators.update_evaluator_version(...)
+client.evaluators.list(...)
-
@@ -6309,7 +6261,7 @@ client.evaluators.delete_evaluator_version(
-
-Update the name or description of the Evaluator version.
+Get a list of all Evaluators.
@@ -6329,10 +6281,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
+response = client.evaluators.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -6348,7 +6304,7 @@ client.evaluators.update_evaluator_version(
-
-**id:** `str` — Unique identifier for Evaluator.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -6356,7 +6312,7 @@ client.evaluators.update_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
@@ -6364,7 +6320,7 @@ client.evaluators.update_evaluator_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
@@ -6372,7 +6328,23 @@ client.evaluators.update_evaluator_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6392,7 +6364,7 @@ client.evaluators.update_evaluator_version(
-client.evaluators.set_deployment(...)
+client.evaluators.upsert(...)
-
@@ -6404,10 +6376,13 @@ client.evaluators.update_evaluator_version(
-
-Deploy Evaluator to an Environment.
+Create an Evaluator or update it with a new version if it already exists.
-Set the deployed version for the specified Environment. This Evaluator
-will be used for calls made to the Evaluator in this Environment.
+Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Evaluator - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -6427,10 +6402,16 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
+client.evaluators.upsert(
+ path="Shared Evaluators/Accuracy Evaluator",
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "number",
+ "evaluator_type": "python",
+ "code": "def evaluate(answer, target):\n return 0.5",
+ },
+ version_name="simple-evaluator",
+ version_description="Simple evaluator that returns 0.5",
)
```
@@ -6447,7 +6428,7 @@ client.evaluators.set_deployment(
-
-**id:** `str` — Unique identifier for Evaluator.
+**spec:** `EvaluatorRequestSpecParams`
@@ -6455,7 +6436,7 @@ client.evaluators.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6463,7 +6444,23 @@ client.evaluators.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -6483,7 +6480,7 @@ client.evaluators.set_deployment(
-client.evaluators.remove_deployment(...)
+client.evaluators.get(...)
-
@@ -6495,10 +6492,10 @@ client.evaluators.set_deployment(
-
-Remove deployed Evaluator from the Environment.
+Retrieve the Evaluator with the given ID.
-Remove the deployed version for the specified Environment. This Evaluator
-will no longer be used for calls made to the Evaluator in this Environment.
+By default, the deployed version of the Evaluator is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Evaluator.
@@ -6518,9 +6515,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.remove_deployment(
+client.evaluators.get(
id="ev_890bcd",
- environment_id="staging",
)
```
@@ -6545,7 +6541,15 @@ client.evaluators.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -6565,7 +6569,7 @@ client.evaluators.remove_deployment(
-client.evaluators.list_environments(...)
+client.evaluators.delete(...)
-
@@ -6577,7 +6581,7 @@ client.evaluators.remove_deployment(
-
-List all Environments and their deployed versions for the Evaluator.
+Delete the Evaluator with the given ID.
@@ -6597,7 +6601,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_environments(
+client.evaluators.delete(
id="ev_890bcd",
)
@@ -6635,7 +6639,7 @@ client.evaluators.list_environments(
-client.evaluators.update_monitoring(...)
+client.evaluators.move(...)
-
@@ -6647,10 +6651,7 @@ client.evaluators.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Evaluator.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Evaluator for monitoring purposes.
+Move the Evaluator to a different path or change the name.
@@ -6670,8 +6671,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_monitoring(
- id="id",
+client.evaluators.move(
+ id="ev_890bcd",
+ path="new directory/new name",
)
```
@@ -6688,7 +6690,7 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Evaluator.
@@ -6696,9 +6698,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
@@ -6706,9 +6706,7 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
@@ -6728,8 +6726,7 @@ client.evaluators.update_monitoring(
-## Flows
-client.flows.log(...)
+client.evaluators.list_versions(...)
-
@@ -6741,13 +6738,7 @@ client.evaluators.update_monitoring(
-
-Log to a Flow.
-
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
+Get a list of all the versions of an Evaluator.
@@ -6762,40 +6753,13 @@ in order to trigger Evaluators.
-
```python
-import datetime
-
from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
- ),
+client.evaluators.list_versions(
+ id="ev_890bcd",
)
```
@@ -6812,7 +6776,7 @@ client.flows.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+**id:** `str` — Unique identifier for the Evaluator.
@@ -6820,7 +6784,7 @@ client.flows.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -6828,31 +6792,70 @@ client.flows.log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.evaluators.delete_evaluator_version(...)
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Evaluator.
+
+
+#### 🔌 Usage
+
-
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
@@ -6860,7 +6863,7 @@ client.flows.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
@@ -6868,15 +6871,70 @@ client.flows.log(
-
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.evaluators.update_evaluator_version(...)
-
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Evaluator version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
@@ -6884,7 +6942,7 @@ client.flows.log(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
@@ -6892,7 +6950,7 @@ client.flows.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -6900,7 +6958,3044 @@ client.flows.log(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Evaluator to an Environment.
+
+Set the deployed version for the specified Environment. This Evaluator
+will be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.set_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+ version_id="evv_012def",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Evaluator from the Environment.
+
+Remove the deployed version for the specified Environment. This Evaluator
+will no longer be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.remove_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_environments(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Evaluator.
+
+An activated Evaluator will automatically be run on all new Logs
+within the Evaluator for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_monitoring(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Flows
+client.flows.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Log to a Flow.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
+If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.log(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ flow={
+ "attributes": {
+ "prompt": {
+ "template": "You are a helpful assistant helping with medical anamnesis",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ }
+ },
+ inputs={
+ "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="incomplete",
+ start_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:35+00:00",
+ ),
+ end_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:39+00:00",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the status, inputs, output of a Flow Log.
+
+Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
+Inputs and output (or error) must be provided in order to mark it as complete.
+
+The end_time log attribute will be set to match the time the log is marked as complete.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_log(
+ log_id="medqa_experiment_0001",
+ inputs={
+ "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — Unique identifier of the Flow Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Flow with the given ID.
+
+By default, the deployed version of the Flow is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.get(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Flow with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Flow to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.move(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Flow.
+
+
+
+
+
+-
+
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of Flows.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.flows.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create or update a Flow.
+
+Flows can also be identified by the `ID` or their `path`.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Flow - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.upsert(
+ path="Personal Projects/MedQA Flow",
+ attributes={
+ "prompt": {
+ "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ "version_name": "medqa-flow-v1",
+ "version_description": "Initial version",
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of a Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_versions(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Flow version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Flow to an Environment.
+
+Set the deployed version for the specified Environment. This Flow
+will be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.set_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+ version_id="flv_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Flow from the Environment.
+
+Remove the deployed version for the specified Environment. This Flow
+will no longer be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.remove_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_environments(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Flow.
+
+An activated Evaluator will automatically be run on all new "completed" Logs
+within the Flow for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_monitoring(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Agents
+client.agents.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Agent Log.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.log()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
+
+
+
+
+
+-
+
+**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
+
+
+
+
+
+-
+
+**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
+
+
+
+
+
+-
+
+**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
+
+
+
+
+
+-
+
+**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
+
+
+
+
+
+-
+
+**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
+
+
+
+
+
+-
+
+**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.update_log(
+ id="id",
+ log_id="log_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.call_stream()
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.continue_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+)
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — This identifies the Agent Log to continue.
+
+
+
+
+
+-
+
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.continue_(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6908,7 +10003,7 @@ client.flows.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6916,7 +10011,7 @@ client.flows.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
@@ -6924,7 +10019,7 @@ client.flows.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6932,55 +10027,67 @@ client.flows.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
+client.agents.list(...)
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
-
+#### 📝 Description
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
-
-
-
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
-
+Get a list of all Agents.
+
+
+#### 🔌 Usage
+
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.list()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
@@ -6988,7 +10095,7 @@ client.flows.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
@@ -6996,7 +10103,7 @@ client.flows.log(
-
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
@@ -7004,7 +10111,7 @@ client.flows.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -7012,7 +10119,7 @@ client.flows.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -7020,7 +10127,7 @@ client.flows.log(
-
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -7040,7 +10147,7 @@ client.flows.log(
-client.flows.update_log(...)
+client.agents.upsert(...)
-
@@ -7052,12 +10159,14 @@ client.flows.log(
-
-Update the status, inputs, output of a Flow Log.
+Create an Agent or update it with a new version if it already exists.
-Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
-Inputs and output (or error) must be provided in order to mark it as complete.
+Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+tools determine the versions of the Agent.
-The end_time log attribute will be set to match the time the log is marked as complete.
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Agent - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -7077,13 +10186,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
+client.agents.upsert(
+ model="model",
)
```
@@ -7100,7 +10204,7 @@ client.flows.update_log(
-
-**log_id:** `str` — Unique identifier of the Flow Log.
+**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -7108,7 +10212,7 @@ client.flows.update_log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -7116,7 +10220,7 @@ client.flows.update_log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -7124,7 +10228,7 @@ client.flows.update_log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -7132,7 +10236,14 @@ client.flows.update_log(
-
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+**template:** `typing.Optional[AgentRequestTemplateParams]`
+
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+For completion models, provide a prompt template as a string.
+
+Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -7140,7 +10251,7 @@ client.flows.update_log(
-
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -7148,7 +10259,7 @@ client.flows.update_log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -7156,72 +10267,127 @@ client.flows.update_log(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+
+-
+
+**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+
+-
+**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
-
-client.flows.get(...)
-
-#### 📝 Description
+**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+
+
-
+**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+
+
+
-
-Retrieve the Flow with the given ID.
+**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+
+
-By default, the deployed version of the Flow is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Flow.
+
+-
+
+**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
+
+
+
+-
+
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
-#### 🔌 Usage
+
+-
+
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+
+
-
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+
+-
-```
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+
+
+-
+
+**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
-#### ⚙️ Parameters
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+
+
-
+**version_description:** `typing.Optional[str]` — Description of the Version.
+
+
+
+
-
-**id:** `str` — Unique identifier for Flow.
+**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7229,7 +10395,7 @@ client.flows.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7237,7 +10403,7 @@ client.flows.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7257,7 +10423,7 @@ client.flows.get(
-client.flows.delete(...)
+client.agents.delete_agent_version(...)
-
@@ -7269,7 +10435,7 @@ client.flows.get(
-
-Delete the Flow with the given ID.
+Delete a version of the Agent.
@@ -7289,8 +10455,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7307,7 +10474,15 @@ client.flows.delete(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7327,7 +10502,7 @@ client.flows.delete(
-client.flows.move(...)
+client.agents.patch_agent_version(...)
-
@@ -7339,7 +10514,7 @@ client.flows.delete(
-
-Move the Flow to a different path or change the name.
+Update the name or description of the Agent version.
@@ -7359,9 +10534,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
+client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7378,7 +10553,7 @@ client.flows.move(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7386,7 +10561,7 @@ client.flows.move(
-
-**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7394,7 +10569,7 @@ client.flows.move(
-
-**name:** `typing.Optional[str]` — Name of the Flow.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -7402,7 +10577,7 @@ client.flows.move(
-
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+**description:** `typing.Optional[str]` — Description of the version.
@@ -7422,7 +10597,7 @@ client.flows.move(
-client.flows.list(...)
+client.agents.get(...)
-
@@ -7434,7 +10609,10 @@ client.flows.move(
-
-Get a list of Flows.
+Retrieve the Agent with the given ID.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -7454,14 +10632,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.flows.list(
- size=1,
+client.agents.get(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -7469,55 +10642,101 @@ for page in response.iter_pages():
-#### ⚙️ Parameters
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+client.agents.delete(...)
-
+#### 📝 Description
+
-
-**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
+Delete the Agent with the given ID.
+
+
+#### 🔌 Usage
+
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
-
-
-
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.delete(
+ id="id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
-
-
-
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10756,7 @@ for page in response.iter_pages():
-client.flows.upsert(...)
+client.agents.move(...)
-
@@ -7549,13 +10768,7 @@ for page in response.iter_pages():
-
-Create or update a Flow.
-
-Flows can also be identified by the `ID` or their `path`.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Flow - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Move the Agent to a different path or change the name.
@@ -7575,22 +10788,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
+client.agents.move(
+ id="id",
)
```
@@ -7607,15 +10806,7 @@ client.flows.upsert(
-
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10814,7 @@ client.flows.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10822,7 @@ client.flows.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10830,7 @@ client.flows.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10850,7 @@ client.flows.upsert(
-client.flows.list_versions(...)
+client.agents.list_versions(...)
-
@@ -7671,7 +10862,7 @@ client.flows.upsert(
-
-Get a list of all the versions of a Flow.
+Get a list of all the versions of a Agent.
@@ -7691,8 +10882,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.list_versions(
+ id="id",
)
```
@@ -7709,7 +10900,7 @@ client.flows.list_versions(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +10928,7 @@ client.flows.list_versions(
-client.flows.delete_flow_version(...)
+client.agents.set_deployment(...)
-
@@ -7749,7 +10940,10 @@ client.flows.list_versions(
-
-Delete a version of the Flow.
+Deploy Agent to an Environment.
+
+Set the deployed version for the specified Environment. This Agent
+will be used for calls made to the Agent in this Environment.
@@ -7769,8 +10963,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete_flow_version(
+client.agents.set_deployment(
id="id",
+ environment_id="environment_id",
version_id="version_id",
)
@@ -7788,7 +10983,7 @@ client.flows.delete_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +10991,15 @@ client.flows.delete_flow_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +11019,7 @@ client.flows.delete_flow_version(
-client.flows.update_flow_version(...)
+client.agents.remove_deployment(...)
-
@@ -7828,7 +11031,10 @@ client.flows.delete_flow_version(
-
-Update the name or description of the Flow version.
+Remove deployed Agent from the Environment.
+
+Remove the deployed version for the specified Environment. This Agent
+will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11054,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_flow_version(
+client.agents.remove_deployment(
id="id",
- version_id="version_id",
+ environment_id="environment_id",
)
```
@@ -7867,23 +11073,7 @@ client.flows.update_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
+**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11081,7 @@ client.flows.update_flow_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11101,7 @@ client.flows.update_flow_version(
-client.flows.set_deployment(...)
+client.agents.list_environments(...)
-
@@ -7923,10 +11113,7 @@ client.flows.update_flow_version(
-
-Deploy Flow to an Environment.
-
-Set the deployed version for the specified Environment. This Flow
-will be used for calls made to the Flow in this Environment.
+List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11133,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
+client.agents.list_environments(
+ id="id",
)
```
@@ -7966,23 +11151,7 @@ client.flows.set_deployment(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11171,7 @@ client.flows.set_deployment(
-client.flows.remove_deployment(...)
+client.agents.update_monitoring(...)
-
@@ -8014,10 +11183,10 @@ client.flows.set_deployment(
-
-Remove deployed Flow from the Environment.
+Activate and deactivate Evaluators for monitoring the Agent.
-Remove the deployed version for the specified Environment. This Flow
-will no longer be used for calls made to the Flow in this Environment.
+An activated Evaluator will automatically be run on all new Logs
+within the Agent for monitoring purposes.
@@ -8037,9 +11206,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
+client.agents.update_monitoring(
+ id="id",
)
```
@@ -8056,7 +11224,7 @@ client.flows.remove_deployment(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str`
@@ -8064,7 +11232,19 @@ client.flows.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11264,7 @@ client.flows.remove_deployment(
-client.flows.list_environments(...)
+client.agents.serialize(...)
-
@@ -8096,7 +11276,13 @@ client.flows.remove_deployment(
-
-List all Environments and their deployed versions for the Flow.
+Serialize an Agent to the .agent file format.
+
+Useful for storing the Agent with your code in a version control system,
+or for editing with an AI tool.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11302,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.serialize(
+ id="id",
)
```
@@ -8134,7 +11320,23 @@ client.flows.list_environments(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11356,7 @@ client.flows.list_environments(
-client.flows.update_monitoring(...)
+client.agents.deserialize(...)
-
@@ -8166,10 +11368,10 @@ client.flows.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Flow.
+Deserialize an Agent from the .agent file format.
-An activated Evaluator will automatically be run on all new "completed" Logs
-within the Flow for monitoring purposes.
+This returns a subset of the attributes required by an Agent.
+This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11391,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.agents.deserialize(
+ agent="agent",
)
```
@@ -8208,27 +11409,7 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
-
-
-
-
--
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
-
-
-
-
--
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**agent:** `str`
@@ -8702,6 +11883,14 @@ client.files.list_files()
-
+**directory:** `typing.Optional[str]` — Case-insensitive filter for directory name.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -8742,6 +11931,14 @@ client.files.list_files()
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -8820,6 +12017,14 @@ client.files.retrieve_by_path(
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -10190,7 +13395,7 @@ for page in response.iter_pages():
-
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 0c431892..2ad9d39e 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,16 +1,45 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentCallResponse,
+ AgentCallResponseToolChoice,
+ AgentCallStreamResponse,
+ AgentCallStreamResponsePayload,
AgentConfigResponse,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
+ AgentInlineTool,
+ AgentKernelRequest,
+ AgentKernelRequestReasoningEffort,
+ AgentKernelRequestStop,
+ AgentKernelRequestTemplate,
+ AgentKernelRequestToolsItem,
+ AgentLinkedFileRequest,
+ AgentLinkedFileResponse,
+ AgentLinkedFileResponseFile,
+ AgentLogResponse,
+ AgentLogResponseToolChoice,
+ AgentLogStreamResponse,
+ AgentResponse,
+ AgentResponseReasoningEffort,
+ AgentResponseStop,
+ AgentResponseTemplate,
+ AgentResponseToolsItem,
+ AnthropicRedactedThinkingContent,
+ AnthropicThinkingContent,
BaseModelsUserResponse,
BooleanEvaluatorStatsResponse,
ChatMessage,
ChatMessageContent,
ChatMessageContentItem,
+ ChatMessageThinkingItem,
ChatRole,
ChatToolType,
CodeEvaluatorRequest,
ConfigToolResponse,
+ CreateAgentLogResponse,
CreateDatapointRequest,
CreateDatapointRequestTargetValue,
CreateEvaluatorLogResponse,
@@ -55,10 +84,12 @@
EvaluatorReturnTypeEnum,
EvaluatorVersionId,
EvaluatorsRequest,
+ EventType,
ExternalEvaluatorRequest,
FeedbackType,
FileEnvironmentResponse,
FileEnvironmentResponseFile,
+ FileEnvironmentVariableRequest,
FileId,
FilePath,
FileRequest,
@@ -76,7 +107,9 @@
ImageUrl,
ImageUrlDetail,
InputResponse,
+ LinkedFileRequest,
LinkedToolResponse,
+ ListAgents,
ListDatasets,
ListEvaluators,
ListFlows,
@@ -85,6 +118,7 @@
LlmEvaluatorRequest,
LogResponse,
LogStatus,
+ LogStreamResponse,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -93,15 +127,18 @@
MonitoringEvaluatorVersionRequest,
NumericEvaluatorStatsResponse,
ObservabilityStatus,
+ OnAgentCallEnum,
+ OpenAiReasoningEffort,
OverallStats,
+ PaginatedDataAgentResponse,
PaginatedDataEvaluationLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
PaginatedDataPromptResponse,
PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
PaginatedDatapointResponse,
PaginatedDatasetResponse,
PaginatedEvaluationResponse,
@@ -110,6 +147,7 @@
PlatformAccessEnum,
PopulateTemplateResponse,
PopulateTemplateResponsePopulatedTemplate,
+ PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
ProjectSortBy,
@@ -118,15 +156,16 @@
PromptCallResponseToolChoice,
PromptCallStreamResponse,
PromptKernelRequest,
+ PromptKernelRequestReasoningEffort,
PromptKernelRequestStop,
PromptKernelRequestTemplate,
PromptLogResponse,
PromptLogResponseToolChoice,
PromptResponse,
+ PromptResponseReasoningEffort,
PromptResponseStop,
PromptResponseTemplate,
ProviderApiKeys,
- ReasoningEffort,
ResponseFormat,
ResponseFormatType,
RunStatsResponse,
@@ -139,6 +178,7 @@
TextEvaluatorStatsResponse,
TimeUnit,
ToolCall,
+ ToolCallResponse,
ToolChoice,
ToolFunction,
ToolKernelRequest,
@@ -162,7 +202,29 @@
VersionStatus,
)
from .errors import UnprocessableEntityError
-from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoice,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffort,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStop,
+ AgentRequestStopParams,
+ AgentRequestTemplate,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItem,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoice,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoice,
+ AgentsCallStreamRequestToolChoiceParams,
+)
from .client import AsyncHumanloop, Humanloop
from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
from .environment import HumanloopEnvironment
@@ -186,26 +248,63 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffort,
+ PromptRequestReasoningEffortParams,
PromptRequestStop,
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
from .requests import (
+ AgentCallResponseParams,
+ AgentCallResponseToolChoiceParams,
+ AgentCallStreamResponseParams,
+ AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
+ AgentInlineToolParams,
+ AgentKernelRequestParams,
+ AgentKernelRequestReasoningEffortParams,
+ AgentKernelRequestStopParams,
+ AgentKernelRequestTemplateParams,
+ AgentKernelRequestToolsItemParams,
+ AgentLinkedFileRequestParams,
+ AgentLinkedFileResponseFileParams,
+ AgentLinkedFileResponseParams,
+ AgentLogResponseParams,
+ AgentLogResponseToolChoiceParams,
+ AgentLogStreamResponseParams,
+ AgentResponseParams,
+ AgentResponseReasoningEffortParams,
+ AgentResponseStopParams,
+ AgentResponseTemplateParams,
+ AgentResponseToolsItemParams,
+ AnthropicRedactedThinkingContentParams,
+ AnthropicThinkingContentParams,
BooleanEvaluatorStatsResponseParams,
ChatMessageContentItemParams,
ChatMessageContentParams,
ChatMessageParams,
+ ChatMessageThinkingItemParams,
CodeEvaluatorRequestParams,
+ CreateAgentLogResponseParams,
CreateDatapointRequestParams,
CreateDatapointRequestTargetValueParams,
CreateEvaluatorLogResponseParams,
@@ -245,6 +344,7 @@
ExternalEvaluatorRequestParams,
FileEnvironmentResponseFileParams,
FileEnvironmentResponseParams,
+ FileEnvironmentVariableRequestParams,
FileIdParams,
FilePathParams,
FileRequestParams,
@@ -258,7 +358,9 @@
ImageChatContentParams,
ImageUrlParams,
InputResponseParams,
+ LinkedFileRequestParams,
LinkedToolResponseParams,
+ ListAgentsParams,
ListDatasetsParams,
ListEvaluatorsParams,
ListFlowsParams,
@@ -266,24 +368,27 @@
ListToolsParams,
LlmEvaluatorRequestParams,
LogResponseParams,
+ LogStreamResponseParams,
MonitoringEvaluatorEnvironmentRequestParams,
MonitoringEvaluatorResponseParams,
MonitoringEvaluatorVersionRequestParams,
NumericEvaluatorStatsResponseParams,
OverallStatsParams,
+ PaginatedDataAgentResponseParams,
PaginatedDataEvaluationLogResponseParams,
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
PaginatedDataPromptResponseParams,
PaginatedDataToolResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
PaginatedDatapointResponseParams,
PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PopulateTemplateResponseParams,
PopulateTemplateResponsePopulatedTemplateParams,
+ PopulateTemplateResponseReasoningEffortParams,
PopulateTemplateResponseStopParams,
PopulateTemplateResponseTemplateParams,
PromptCallLogResponseParams,
@@ -291,11 +396,13 @@
PromptCallResponseToolChoiceParams,
PromptCallStreamResponseParams,
PromptKernelRequestParams,
+ PromptKernelRequestReasoningEffortParams,
PromptKernelRequestStopParams,
PromptKernelRequestTemplateParams,
PromptLogResponseParams,
PromptLogResponseToolChoiceParams,
PromptResponseParams,
+ PromptResponseReasoningEffortParams,
PromptResponseStopParams,
PromptResponseTemplateParams,
ProviderApiKeysParams,
@@ -307,6 +414,7 @@
TextChatContentParams,
TextEvaluatorStatsResponseParams,
ToolCallParams,
+ ToolCallResponseParams,
ToolChoiceParams,
ToolFunctionParams,
ToolKernelRequestParams,
@@ -329,8 +437,82 @@
__all__ = [
"AddEvaluatorsRequestEvaluatorsItem",
"AddEvaluatorsRequestEvaluatorsItemParams",
+ "AgentCallResponse",
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoice",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayload",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineTool",
+ "AgentInlineToolParams",
+ "AgentKernelRequest",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItem",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentLogResponse",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoice",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponse",
+ "AgentLogStreamResponseParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentResponse",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffort",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStop",
+ "AgentResponseStopParams",
+ "AgentResponseTemplate",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItem",
+ "AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContent",
+ "AnthropicThinkingContentParams",
"AsyncHumanloop",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
@@ -341,11 +523,15 @@
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItem",
+ "ChatMessageThinkingItemParams",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"CodeEvaluatorRequestParams",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequest",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValue",
@@ -438,6 +624,7 @@
"EvaluatorVersionId",
"EvaluatorVersionIdParams",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"ExternalEvaluatorRequestParams",
"FeedbackType",
@@ -445,6 +632,8 @@
"FileEnvironmentResponseFile",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequest",
+ "FileEnvironmentVariableRequestParams",
"FileId",
"FileIdParams",
"FilePath",
@@ -477,8 +666,12 @@
"ImageUrlParams",
"InputResponse",
"InputResponseParams",
+ "LinkedFileRequest",
+ "LinkedFileRequestParams",
"LinkedToolResponse",
"LinkedToolResponseParams",
+ "ListAgents",
+ "ListAgentsParams",
"ListDatasets",
"ListDatasetsParams",
"ListEvaluators",
@@ -495,6 +688,8 @@
"LogResponse",
"LogResponseParams",
"LogStatus",
+ "LogStreamResponse",
+ "LogStreamResponseParams",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -507,8 +702,12 @@
"NumericEvaluatorStatsResponse",
"NumericEvaluatorStatsResponseParams",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
"OverallStatsParams",
+ "PaginatedDataAgentResponse",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponse",
@@ -521,10 +720,10 @@
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponse",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponse",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponse",
@@ -538,6 +737,8 @@
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplate",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffort",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
@@ -553,10 +754,14 @@
"PromptCallStreamResponseParams",
"PromptKernelRequest",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffort",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStop",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -565,23 +770,30 @@
"PromptLogResponseToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
"PromptResponse",
"PromptResponseParams",
+ "PromptResponseReasoningEffort",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStop",
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
"ProviderApiKeysParams",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatParams",
"ResponseFormatType",
@@ -604,6 +816,8 @@
"TimeUnit",
"ToolCall",
"ToolCallParams",
+ "ToolCallResponse",
+ "ToolCallResponseParams",
"ToolChoice",
"ToolChoiceParams",
"ToolFunction",
@@ -643,6 +857,7 @@
"VersionStatsResponseParams",
"VersionStatus",
"__version__",
+ "agents",
"datasets",
"directories",
"evaluations",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
new file mode 100644
index 00000000..ab2a2f9e
--- /dev/null
+++ b/src/humanloop/agents/__init__.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AgentLogRequestAgent,
+ AgentLogRequestToolChoice,
+ AgentRequestReasoningEffort,
+ AgentRequestStop,
+ AgentRequestTemplate,
+ AgentRequestToolsItem,
+ AgentsCallRequestAgent,
+ AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestToolChoice,
+)
+from .requests import (
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStopParams,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoiceParams,
+)
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
new file mode 100644
index 00000000..5cc38277
--- /dev/null
+++ b/src/humanloop/agents/client.py
@@ -0,0 +1,3210 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .raw_client import RawAgentsClient
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.log_response import LogResponse
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from .raw_client import AsyncRawAgentsClient
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._raw_client = RawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> RawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ RawAgentsClient
+ """
+ return self._raw_client
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.log()
+ """
+ response = self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+ """
+ response = self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ """
+ response = self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.upsert(
+ model="model",
+ )
+ """
+ response = self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.get(
+ id="id",
+ )
+ """
+ response = self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete(
+ id="id",
+ )
+ """
+ response = self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.move(
+ id="id",
+ )
+ """
+ response = self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_versions(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+ """
+ response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_environments(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_monitoring(
+ id="id",
+ )
+ """
+ response = self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.deserialize(
+ agent="agent",
+ )
+ """
+ response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
+
+
+class AsyncAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> AsyncRawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ AsyncRawAgentsClient
+ """
+ return self._raw_client
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.log()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.upsert(
+ model="model",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.get(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.move(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_versions(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_environments(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_monitoring(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.deserialize(
+ agent="agent",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
new file mode 100644
index 00000000..226f3c35
--- /dev/null
+++ b/src/humanloop/agents/raw_client.py
@@ -0,0 +1,3891 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..core.http_response import HttpResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.log_response import LogResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+import httpx_sse
+import contextlib
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from ..core.http_response import AsyncHttpResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RawAgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentContinueResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentContinueResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
new file mode 100644
index 00000000..06ce37ed
--- /dev/null
+++ b/src/humanloop/agents/requests/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgentParams
+from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .agent_request_stop import AgentRequestStopParams
+from .agent_request_template import AgentRequestTemplateParams
+from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+
+__all__ = [
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStopParams",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..584112aa
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentLogRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..98a991cd
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py
new file mode 100644
index 00000000..3970451c
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
new file mode 100644
index 00000000..c251ce8e
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.chat_message import ChatMessageParams
+
+AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
new file mode 100644
index 00000000..20cde136
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+from ...requests.agent_inline_tool import AgentInlineToolParams
+
+AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..1e468fa0
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..bd068b6f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallStreamRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
new file mode 100644
index 00000000..9c8a955c
--- /dev/null
+++ b/src/humanloop/agents/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgent
+from .agent_log_request_tool_choice import AgentLogRequestToolChoice
+from .agent_request_reasoning_effort import AgentRequestReasoningEffort
+from .agent_request_stop import AgentRequestStop
+from .agent_request_template import AgentRequestTemplate
+from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestToolChoice",
+ "AgentRequestReasoningEffort",
+ "AgentRequestStop",
+ "AgentRequestTemplate",
+ "AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestToolChoice",
+]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..bfb576c2
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentLogRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..b4267202
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py
new file mode 100644
index 00000000..325a6b2e
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
new file mode 100644
index 00000000..f6474824
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.chat_message import ChatMessage
+
+AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
new file mode 100644
index 00000000..e6c54b88
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
+from ...types.agent_inline_tool import AgentInlineTool
+
+AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..6dee5a04
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..83d264f0
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallStreamRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index bf72be6a..a11298b8 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -11,6 +11,7 @@
from .datasets.client import DatasetsClient
from .evaluators.client import EvaluatorsClient
from .flows.client import FlowsClient
+from .agents.client import AgentsClient
from .directories.client import DirectoriesClient
from .files.client import FilesClient
from .evaluations.client import EvaluationsClient
@@ -21,6 +22,7 @@
from .datasets.client import AsyncDatasetsClient
from .evaluators.client import AsyncEvaluatorsClient
from .flows.client import AsyncFlowsClient
+from .agents.client import AsyncAgentsClient
from .directories.client import AsyncDirectoriesClient
from .files.client import AsyncFilesClient
from .evaluations.client import AsyncEvaluationsClient
@@ -96,6 +98,7 @@ def __init__(
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = FlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AgentsClient(client_wrapper=self._client_wrapper)
self.directories = DirectoriesClient(client_wrapper=self._client_wrapper)
self.files = FilesClient(client_wrapper=self._client_wrapper)
self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper)
@@ -171,6 +174,7 @@ def __init__(
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper)
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper)
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index f25dc2ca..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.35",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.35",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index c07358d0..2d4e1855 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -7,8 +7,8 @@
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -39,13 +39,15 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -60,6 +62,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -75,12 +80,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -96,11 +104,13 @@ def list_files(
page=page,
size=size,
name=name,
+ directory=directory,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -110,6 +120,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +134,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -143,7 +157,7 @@ def retrieve_by_path(
)
"""
response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path, environment=environment, include_content=include_content, request_options=request_options
)
return response.data
@@ -169,13 +183,15 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -190,6 +206,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -205,12 +224,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -234,11 +256,13 @@ async def main() -> None:
page=page,
size=size,
name=name,
+ directory=directory,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -248,6 +272,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +286,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +317,6 @@ async def main() -> None:
asyncio.run(main())
"""
response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path, environment=environment, include_content=include_content, request_options=request_options
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 19f52cf2..1a30a892 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -7,8 +7,8 @@
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -33,13 +33,17 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> HttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -54,6 +58,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +76,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -84,20 +94,22 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "directory": directory,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -122,6 +134,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -135,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -148,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_content": include_content,
},
json={
"path": path,
@@ -194,13 +211,17 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> AsyncHttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -215,6 +236,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -230,12 +254,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -245,20 +272,22 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "directory": directory,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -283,6 +312,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -296,6 +326,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -309,6 +342,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_content": include_content,
},
json={
"path": path,
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index c1618edb..8c070ab3 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,13 @@
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
+from ...requests.agent_response import AgentResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 48415fc9..46ea271a 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,8 @@
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
+from ...types.agent_response import AgentResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index a11776fc..bcb9491c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -214,10 +214,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1128,10 +1128,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 17007c1b..b16d1f6b 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -99,7 +99,7 @@ def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -338,7 +338,7 @@ async def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index c1147ff2..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,33 +1,49 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
+ PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e2fff4c3..865c033f 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,9 +13,11 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
@@ -33,7 +35,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.populate_template_response import PopulateTemplateResponse
from ..types.list_prompts import ListPrompts
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawPromptsClient
from ..core.pagination import AsyncPager
@@ -84,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -165,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -256,7 +262,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -479,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -537,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -648,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -706,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -962,7 +974,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -1037,8 +1049,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1599,6 +1611,93 @@ def update_monitoring(
)
return response.data
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.deserialize(
+ prompt="prompt",
+ )
+ """
+ response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
+
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1632,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1713,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1810,7 +1912,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2044,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2102,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2222,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2280,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2552,7 +2660,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2627,8 +2735,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -3284,3 +3392,106 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.deserialize(
+ prompt="prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index b5334c82..f809f1b1 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -32,7 +34,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.prompt_response import PromptResponse
from ..types.populate_template_response import PopulateTemplateResponse
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -72,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -153,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -248,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -495,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -553,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -632,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -705,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -763,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -842,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -915,7 +927,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -990,8 +1002,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1051,7 +1063,9 @@ def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -1744,6 +1758,127 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncRawPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1766,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1847,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1942,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2189,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2247,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2326,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2399,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2457,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2536,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2609,7 +2753,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2684,8 +2828,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -2745,7 +2889,9 @@ async def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -3439,3 +3585,124 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index c5119552..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..080a107e
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 644cf6b5..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
+ "PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..33f35288
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index bd9458ba..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,11 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponseParams
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_call_stream_response import AgentCallStreamResponseParams
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from .agent_inline_tool import AgentInlineToolParams
+from .agent_kernel_request import AgentKernelRequestParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_linked_file_response import AgentLinkedFileResponseParams
+from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+from .agent_log_response import AgentLogResponseParams
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+from .agent_response import AgentResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .agent_response_tools_item import AgentResponseToolsItemParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
from .chat_message import ChatMessageParams
from .chat_message_content import ChatMessageContentParams
from .chat_message_content_item import ChatMessageContentItemParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
from .code_evaluator_request import CodeEvaluatorRequestParams
+from .create_agent_log_response import CreateAgentLogResponseParams
from .create_datapoint_request import CreateDatapointRequestParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
from .create_evaluator_log_response import CreateEvaluatorLogResponseParams
@@ -51,6 +80,7 @@
from .external_evaluator_request import ExternalEvaluatorRequestParams
from .file_environment_response import FileEnvironmentResponseParams
from .file_environment_response_file import FileEnvironmentResponseFileParams
+from .file_environment_variable_request import FileEnvironmentVariableRequestParams
from .file_id import FileIdParams
from .file_path import FilePathParams
from .file_request import FileRequestParams
@@ -64,7 +94,9 @@
from .image_chat_content import ImageChatContentParams
from .image_url import ImageUrlParams
from .input_response import InputResponseParams
+from .linked_file_request import LinkedFileRequestParams
from .linked_tool_response import LinkedToolResponseParams
+from .list_agents import ListAgentsParams
from .list_datasets import ListDatasetsParams
from .list_evaluators import ListEvaluatorsParams
from .list_flows import ListFlowsParams
@@ -72,28 +104,31 @@
from .list_tools import ListToolsParams
from .llm_evaluator_request import LlmEvaluatorRequestParams
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .overall_stats import OverallStatsParams
+from .paginated_data_agent_response import PaginatedDataAgentResponseParams
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams
from .paginated_data_flow_response import PaginatedDataFlowResponseParams
from .paginated_data_log_response import PaginatedDataLogResponseParams
from .paginated_data_prompt_response import PaginatedDataPromptResponseParams
from .paginated_data_tool_response import PaginatedDataToolResponseParams
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
from .paginated_datapoint_response import PaginatedDatapointResponseParams
from .paginated_dataset_response import PaginatedDatasetResponseParams
from .paginated_evaluation_response import PaginatedEvaluationResponseParams
from .populate_template_response import PopulateTemplateResponseParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .populate_template_response_stop import PopulateTemplateResponseStopParams
from .populate_template_response_template import PopulateTemplateResponseTemplateParams
from .prompt_call_log_response import PromptCallLogResponseParams
@@ -101,11 +136,13 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_call_stream_response import PromptCallStreamResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .prompt_log_response import PromptLogResponseParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
from .prompt_response_template import PromptResponseTemplateParams
from .provider_api_keys import ProviderApiKeysParams
@@ -117,6 +154,7 @@
from .text_chat_content import TextChatContentParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
from .tool_call import ToolCallParams
+from .tool_call_response import ToolCallResponseParams
from .tool_choice import ToolChoiceParams
from .tool_function import ToolFunctionParams
from .tool_kernel_request import ToolKernelRequestParams
@@ -135,12 +173,41 @@
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
__all__ = [
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineToolParams",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponseParams",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStopParams",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItemParams",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContentParams",
"BooleanEvaluatorStatsResponseParams",
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItemParams",
"CodeEvaluatorRequestParams",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValueParams",
"CreateEvaluatorLogResponseParams",
@@ -180,6 +247,7 @@
"ExternalEvaluatorRequestParams",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequestParams",
"FileIdParams",
"FilePathParams",
"FileRequestParams",
@@ -193,7 +261,9 @@
"ImageChatContentParams",
"ImageUrlParams",
"InputResponseParams",
+ "LinkedFileRequestParams",
"LinkedToolResponseParams",
+ "ListAgentsParams",
"ListDatasetsParams",
"ListEvaluatorsParams",
"ListFlowsParams",
@@ -201,24 +271,27 @@
"ListToolsParams",
"LlmEvaluatorRequestParams",
"LogResponseParams",
+ "LogStreamResponseParams",
"MonitoringEvaluatorEnvironmentRequestParams",
"MonitoringEvaluatorResponseParams",
"MonitoringEvaluatorVersionRequestParams",
"NumericEvaluatorStatsResponseParams",
"OverallStatsParams",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponseParams",
"PaginatedDataFlowResponseParams",
"PaginatedDataLogResponseParams",
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponseParams",
"PaginatedEvaluationResponseParams",
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplateParams",
"PromptCallLogResponseParams",
@@ -226,11 +299,13 @@
"PromptCallResponseToolChoiceParams",
"PromptCallStreamResponseParams",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplateParams",
"PromptLogResponseParams",
"PromptLogResponseToolChoiceParams",
"PromptResponseParams",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStopParams",
"PromptResponseTemplateParams",
"ProviderApiKeysParams",
@@ -242,6 +317,7 @@
"TextChatContentParams",
"TextEvaluatorStatsResponseParams",
"ToolCallParams",
+ "ToolCallResponseParams",
"ToolChoiceParams",
"ToolFunctionParams",
"ToolKernelRequestParams",
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
new file mode 100644
index 00000000..ffc925ec
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..6cc9f9c4
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
new file mode 100644
index 00000000..9555925d
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..0e08a6f3
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
new file mode 100644
index 00000000..8300667b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..24b044cc
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
new file mode 100644
index 00000000..1038e000
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..ddd74c10
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
new file mode 100644
index 00000000..31f9401a
--- /dev/null
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .tool_function import ToolFunctionParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentInlineToolParams(typing_extensions.TypedDict):
+ type: typing.Literal["inline"]
+ json_schema: ToolFunctionParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
new file mode 100644
index 00000000..0ca76571
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+
+
+class AgentKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentKernelRequestStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]]
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..ea32bc11
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py
new file mode 100644
index 00000000..eae95d35
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
new file mode 100644
index 00000000..7261667d
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..27b63984
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_inline_tool import AgentInlineToolParams
+
+AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
new file mode 100644
index 00000000..18fc2274
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
new file mode 100644
index 00000000..8a690a77
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+
+
+class AgentLinkedFileResponseParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
+ file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"]
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
new file mode 100644
index 00000000..bb328de2
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponseParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
+ from .evaluator_response import EvaluatorResponseParams
+ from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
+AgentLinkedFileResponseFileParams = typing.Union[
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
+]
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
new file mode 100644
index 00000000..0cb24b8a
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response.py
@@ -0,0 +1,201 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+import typing
+
+if typing.TYPE_CHECKING:
+ from .evaluator_log_response import EvaluatorLogResponseParams
+ from .log_response import LogResponseParams
+
+
+class AgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..e239a69c
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentLogResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
new file mode 100644
index 00000000..710d55cf
--- /dev/null
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .chat_message import ChatMessageParams
+
+
+class AgentLogStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ agent_id: str
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str
+ """
+ ID of the specific version of the Agent.
+ """
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
new file mode 100644
index 00000000..f482728d
--- /dev/null
+++ b/src/humanloop/requests/agent_response.py
@@ -0,0 +1,242 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .environment_response import EnvironmentResponseParams
+import datetime as dt
+from ..types.user_response import UserResponse
+from ..types.version_status import VersionStatus
+from .input_response import InputResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_response_tools_item import AgentResponseToolsItemParams
+ from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
+
+
+class AgentResponseParams(typing_extensions.TypedDict):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing_extensions.NotRequired[str]
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentResponseTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentResponseStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Sequence["AgentResponseToolsItemParams"]
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing_extensions.NotRequired[str]
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing_extensions.NotRequired[str]
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Description of the Agent.
+ """
+
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing_extensions.NotRequired[str]
+ """
+ Long description of the file.
+ """
+
+ name: str
+ """
+ Name of the Agent.
+ """
+
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing_extensions.NotRequired[typing.Literal["agent"]]
+ environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]]
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.Sequence[InputResponseParams]
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]]
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]]
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..de1b969f
--- /dev/null
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py
new file mode 100644
index 00000000..a395ee73
--- /dev/null
+++ b/src/humanloop/requests/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
new file mode 100644
index 00000000..94be65f1
--- /dev/null
+++ b/src/humanloop/requests/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
new file mode 100644
index 00000000..5181579b
--- /dev/null
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineToolParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponseParams
+AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams]
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..3b328f7f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["redacted_thinking"]
+ data: str
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
new file mode 100644
index 00000000..34f6f99f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["thinking"]
+ thinking: str
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index cab8466d..6011653a 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -6,6 +6,7 @@
from ..types.chat_role import ChatRole
import typing
from .tool_call import ToolCallParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
class ChatMessageParams(typing_extensions.TypedDict):
@@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict):
"""
A list of tool calls requested by the assistant.
"""
+
+ thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]]
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
new file mode 100644
index 00000000..0691f4d8
--- /dev/null
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContentParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+
+ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
new file mode 100644
index 00000000..b1715517
--- /dev/null
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.log_status import LogStatus
+
+
+class CreateAgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1d59ed4b..1cffd2b2 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict):
Description of the Dataset.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index f101bf15..db9370b9 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,13 @@
from .evaluator_response import EvaluatorResponseParams
from .dataset_response import DatasetResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ EvaluatorResponseParams,
+ DatasetResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 908eeb2d..1ff836fb 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict):
Description of the Evaluator.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 4ac6b0c3..04c0b51d 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -6,7 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
FileEnvironmentResponseFileParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py
new file mode 100644
index 00000000..bb70bda4
--- /dev/null
+++ b/src/humanloop/requests/file_environment_variable_request.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict):
+ name: str
+ """
+ Name of the environment variable.
+ """
+
+ value: str
+ """
+ Value of the environment variable.
+ """
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 18a26d10..eebc9fd7 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict):
Description of the Flow.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
new file mode 100644
index 00000000..2bbba19c
--- /dev/null
+++ b/src/humanloop/requests/linked_file_request.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class LinkedFileRequestParams(typing_extensions.TypedDict):
+ file_id: str
+ environment_id: typing_extensions.NotRequired[str]
+ version_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
new file mode 100644
index 00000000..4a72f1db
--- /dev/null
+++ b/src/humanloop/requests/list_agents.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class ListAgentsParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ """
+ The list of Agents.
+ """
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index 15a4cff6..cb3ce212 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -9,6 +9,11 @@
from .tool_log_response import ToolLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
LogResponseParams = typing.Union[
- "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams"
+ "PromptLogResponseParams",
+ "ToolLogResponseParams",
+ "EvaluatorLogResponseParams",
+ "FlowLogResponseParams",
+ "AgentLogResponseParams",
]
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
new file mode 100644
index 00000000..e142e7fb
--- /dev/null
+++ b/src/humanloop/requests/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponseParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+
+LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
new file mode 100644
index 00000000..c8d67533
--- /dev/null
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class PaginatedDataAgentResponseParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ page: int
+ size: int
+ total: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 65%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index cf8bc4bf..0e7adb64 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -2,16 +2,16 @@
import typing_extensions
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams(
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams(
typing_extensions.TypedDict
):
records: typing.Sequence[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams
]
page: int
size: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 58%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 1ba74108..b43a5521 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,9 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = (
- typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
- ]
-)
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
+]
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 190341b0..40b62295 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -9,7 +9,7 @@
from .populate_template_response_stop import PopulateTemplateResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -213,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..6b1dd46a
--- /dev/null
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 61355166..1e4f56de 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -9,11 +9,17 @@
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
class PromptKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..0c3d194b
--- /dev/null
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index 912866c5..05b4a71e 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -10,7 +10,7 @@
from .prompt_response_stop import PromptResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -215,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..4d019051
--- /dev/null
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 879ea25c..569d0d76 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -5,7 +5,8 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
RunVersionResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
new file mode 100644
index 00000000..1c92b28f
--- /dev/null
+++ b/src/humanloop/requests/tool_call_response.py
@@ -0,0 +1,146 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .tool_response import ToolResponseParams
+import typing
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class ToolCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponseParams
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index bac9dbbb..1aa0daea 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -7,6 +7,7 @@
import typing
from ..types.log_status import LogStatus
from .tool_response import ToolResponseParams
+from .chat_message import ChatMessageParams
import typing
if typing.TYPE_CHECKING:
@@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
"""
Tool used to generate the Log.
"""
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the Tool.
+ """
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 8a16af00..9659cb49 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionDeploymentResponseFileParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 50ecf7bc..9c317679 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionIdResponseVersionParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index 16d75bd7..ea6b14a2 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -3,10 +3,11 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawToolsClient
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
+from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..types.project_sort_by import ProjectSortBy
@@ -29,6 +30,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawToolsClient
from ..core.pagination import AsyncPager
@@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient:
"""
return self._raw_client
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
def log(
self,
*,
@@ -59,6 +189,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -78,7 +209,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -106,6 +236,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -163,9 +296,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -206,6 +336,7 @@ def log(
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -225,7 +356,6 @@ def log(
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -966,6 +1096,112 @@ def update_monitoring(
)
return response.data
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.get_environment_variables(
+ id="id",
+ )
+ """
+ response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+ """
+ response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+ """
+ response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
+
class AsyncToolsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient:
"""
return self._raw_client
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
async def log(
self,
*,
@@ -989,6 +1360,7 @@ async def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1008,7 +1380,6 @@ async def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -1036,6 +1407,9 @@ async def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1093,9 +1467,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1144,6 +1515,7 @@ async def main() -> None:
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -1163,7 +1535,6 @@ async def main() -> None:
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -2010,3 +2381,133 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.get_environment_variables(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 4a1f29e9..b412b771 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -2,18 +2,19 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..requests.tool_function import ToolFunctionParams
@@ -27,6 +28,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -38,6 +41,159 @@ class RawToolsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def log(
self,
*,
@@ -45,6 +201,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -64,7 +221,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateToolLogResponse]:
"""
@@ -92,6 +248,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -149,9 +308,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -170,6 +326,9 @@ def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -189,9 +348,6 @@ def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -1038,75 +1194,387 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
-class AsyncRawToolsClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- async def log(
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_environment_variable(
self,
+ id: str,
*,
- version_id: typing.Optional[str] = None,
- environment: typing.Optional[str] = None,
- path: typing.Optional[str] = OMIT,
- id: typing.Optional[str] = OMIT,
- start_time: typing.Optional[dt.datetime] = OMIT,
- end_time: typing.Optional[dt.datetime] = OMIT,
- output: typing.Optional[str] = OMIT,
- created_at: typing.Optional[dt.datetime] = OMIT,
- error: typing.Optional[str] = OMIT,
- provider_latency: typing.Optional[float] = OMIT,
- stdout: typing.Optional[str] = OMIT,
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- source: typing.Optional[str] = OMIT,
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
- source_datapoint_id: typing.Optional[str] = OMIT,
- trace_parent_id: typing.Optional[str] = OMIT,
- user: typing.Optional[str] = OMIT,
- tool_log_request_environment: typing.Optional[str] = OMIT,
- save: typing.Optional[bool] = OMIT,
- log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
"""
- Log to a Tool.
-
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Tool. Otherwise the default deployed version will be chosen.
-
- Instead of targeting an existing version explicitly, you can instead pass in
- Tool details in the request body. In this case, we will check if the details correspond
- to an existing version of the Tool, if not we will create a new version. This is helpful
- in the case where you are storing or deriving your Tool details in code.
+ Add an environment variable to a Tool.
Parameters
----------
- version_id : typing.Optional[str]
- A specific Version ID of the Tool to log to.
-
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
-
- path : typing.Optional[str]
- Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
- id : typing.Optional[str]
- ID for an existing Tool.
-
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ id : str
+ Unique identifier for Tool.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- created_at : typing.Optional[dt.datetime]
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawToolsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ """
+ Log to a Tool.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool, if not we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
User defined timestamp for when the log was created.
error : typing.Optional[str]
@@ -1154,9 +1622,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1175,6 +1640,9 @@ async def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -1194,9 +1662,6 @@ async def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -2044,3 +2509,159 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 156f4e9a..8130325d 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,15 +1,44 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponse
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+from .agent_call_stream_response import AgentCallStreamResponse
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .agent_inline_tool import AgentInlineTool
+from .agent_kernel_request import AgentKernelRequest
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile
+from .agent_log_response import AgentLogResponse
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+from .agent_log_stream_response import AgentLogStreamResponse
+from .agent_response import AgentResponse
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
+from .agent_response_tools_item import AgentResponseToolsItem
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
from .base_models_user_response import BaseModelsUserResponse
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
from .chat_message import ChatMessage
from .chat_message_content import ChatMessageContent
from .chat_message_content_item import ChatMessageContentItem
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .chat_tool_type import ChatToolType
from .code_evaluator_request import CodeEvaluatorRequest
from .config_tool_response import ConfigToolResponse
+from .create_agent_log_response import CreateAgentLogResponse
from .create_datapoint_request import CreateDatapointRequest
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
from .create_evaluator_log_response import CreateEvaluatorLogResponse
@@ -56,10 +85,12 @@
from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .evaluator_version_id import EvaluatorVersionId
from .evaluators_request import EvaluatorsRequest
+from .event_type import EventType
from .external_evaluator_request import ExternalEvaluatorRequest
from .feedback_type import FeedbackType
from .file_environment_response import FileEnvironmentResponse
from .file_environment_response_file import FileEnvironmentResponseFile
+from .file_environment_variable_request import FileEnvironmentVariableRequest
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
@@ -77,7 +108,9 @@
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .input_response import InputResponse
+from .linked_file_request import LinkedFileRequest
from .linked_tool_response import LinkedToolResponse
+from .list_agents import ListAgents
from .list_datasets import ListDatasets
from .list_evaluators import ListEvaluators
from .list_flows import ListFlows
@@ -86,6 +119,7 @@
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
from .log_status import LogStatus
+from .log_stream_response import LogStreamResponse
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -94,18 +128,21 @@
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .observability_status import ObservabilityStatus
+from .on_agent_call_enum import OnAgentCallEnum
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
from .overall_stats import OverallStats
+from .paginated_data_agent_response import PaginatedDataAgentResponse
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from .paginated_data_flow_response import PaginatedDataFlowResponse
from .paginated_data_log_response import PaginatedDataLogResponse
from .paginated_data_prompt_response import PaginatedDataPromptResponse
from .paginated_data_tool_response import PaginatedDataToolResponse
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from .paginated_datapoint_response import PaginatedDatapointResponse
from .paginated_dataset_response import PaginatedDatasetResponse
@@ -115,6 +152,7 @@
from .platform_access_enum import PlatformAccessEnum
from .populate_template_response import PopulateTemplateResponse
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
from .project_sort_by import ProjectSortBy
@@ -123,15 +161,16 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .prompt_call_stream_response import PromptCallStreamResponse
from .prompt_kernel_request import PromptKernelRequest
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .prompt_log_response import PromptLogResponse
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
from .prompt_response import PromptResponse
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
from .prompt_response_template import PromptResponseTemplate
from .provider_api_keys import ProviderApiKeys
-from .reasoning_effort import ReasoningEffort
from .response_format import ResponseFormat
from .response_format_type import ResponseFormatType
from .run_stats_response import RunStatsResponse
@@ -144,6 +183,7 @@
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
from .time_unit import TimeUnit
from .tool_call import ToolCall
+from .tool_call_response import ToolCallResponse
from .tool_choice import ToolChoice
from .tool_function import ToolFunction
from .tool_kernel_request import ToolKernelRequest
@@ -167,16 +207,45 @@
from .version_status import VersionStatus
__all__ = [
+ "AgentCallResponse",
+ "AgentCallResponseToolChoice",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponsePayload",
"AgentConfigResponse",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
+ "AgentInlineTool",
+ "AgentKernelRequest",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestToolsItem",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLogResponse",
+ "AgentLogResponseToolChoice",
+ "AgentLogStreamResponse",
+ "AgentResponse",
+ "AgentResponseReasoningEffort",
+ "AgentResponseStop",
+ "AgentResponseTemplate",
+ "AgentResponseToolsItem",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicThinkingContent",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
"ChatMessage",
"ChatMessageContent",
"ChatMessageContentItem",
+ "ChatMessageThinkingItem",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
"CreateDatapointRequest",
"CreateDatapointRequestTargetValue",
"CreateEvaluatorLogResponse",
@@ -221,10 +290,12 @@
"EvaluatorReturnTypeEnum",
"EvaluatorVersionId",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"FeedbackType",
"FileEnvironmentResponse",
"FileEnvironmentResponseFile",
+ "FileEnvironmentVariableRequest",
"FileId",
"FilePath",
"FileRequest",
@@ -242,7 +313,9 @@
"ImageUrl",
"ImageUrlDetail",
"InputResponse",
+ "LinkedFileRequest",
"LinkedToolResponse",
+ "ListAgents",
"ListDatasets",
"ListEvaluators",
"ListFlows",
@@ -251,6 +324,7 @@
"LlmEvaluatorRequest",
"LogResponse",
"LogStatus",
+ "LogStreamResponse",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -259,15 +333,18 @@
"MonitoringEvaluatorVersionRequest",
"NumericEvaluatorStatsResponse",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
+ "PaginatedDataAgentResponse",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluatorResponse",
"PaginatedDataFlowResponse",
"PaginatedDataLogResponse",
"PaginatedDataPromptResponse",
"PaginatedDataToolResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
"PaginatedDatapointResponse",
"PaginatedDatasetResponse",
"PaginatedEvaluationResponse",
@@ -276,6 +353,7 @@
"PlatformAccessEnum",
"PopulateTemplateResponse",
"PopulateTemplateResponsePopulatedTemplate",
+ "PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
"ProjectSortBy",
@@ -284,15 +362,16 @@
"PromptCallResponseToolChoice",
"PromptCallStreamResponse",
"PromptKernelRequest",
+ "PromptKernelRequestReasoningEffort",
"PromptKernelRequestStop",
"PromptKernelRequestTemplate",
"PromptLogResponse",
"PromptLogResponseToolChoice",
"PromptResponse",
+ "PromptResponseReasoningEffort",
"PromptResponseStop",
"PromptResponseTemplate",
"ProviderApiKeys",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatType",
"RunStatsResponse",
@@ -305,6 +384,7 @@
"TextEvaluatorStatsResponse",
"TimeUnit",
"ToolCall",
+ "ToolCallResponse",
"ToolChoice",
"ToolFunction",
"ToolKernelRequest",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
new file mode 100644
index 00000000..ba3bbfec
--- /dev/null
+++ b/src/humanloop/types/agent_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..95eca73e
--- /dev/null
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
new file mode 100644
index 00000000..673d3738
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..85422047
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
new file mode 100644
index 00000000..0bbd7858
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..20f3fb75
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
new file mode 100644
index 00000000..ff7a0fac
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..0e5f8a58
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
new file mode 100644
index 00000000..dc618c35
--- /dev/null
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tool_function import ToolFunction
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentInlineTool(UncheckedBaseModel):
+ type: typing.Literal["inline"] = "inline"
+ json_schema: ToolFunction
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
new file mode 100644
index 00000000..6503b104
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -0,0 +1,122 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .response_format import ResponseFormat
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..a8e8e98b
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py
new file mode 100644
index 00000000..e38c12e2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
new file mode 100644
index 00000000..31a351f2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..82c2fecf
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_inline_tool import AgentInlineTool
+
+AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
new file mode 100644
index 00000000..9efd4b6a
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentLinkedFileRequest(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
new file mode 100644
index 00000000..d85d682e
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLinkedFileResponse(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+ file: typing.Optional["AgentLinkedFileResponseFile"] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_response import AgentResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+
+update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
new file mode 100644
index 00000000..42d38fe4
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponse
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
+ from .evaluator_response import EvaluatorResponse
+ from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
+AgentLinkedFileResponseFile = typing.Union[
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
+]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
new file mode 100644
index 00000000..f5b5e8e8
--- /dev/null
+++ b/src/humanloop/types/agent_log_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLogResponse(UncheckedBaseModel):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
+from .flow_log_response import FlowLogResponse # noqa: E402
+from .prompt_log_response import PromptLogResponse # noqa: E402
+from .tool_log_response import ToolLogResponse # noqa: E402
+from .log_response import LogResponse # noqa: E402
+
+update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..5cb07628
--- /dev/null
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentLogResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
new file mode 100644
index 00000000..91547189
--- /dev/null
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+import datetime as dt
+from .chat_message import ChatMessage
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentLogStreamResponse(UncheckedBaseModel):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ ID of the specific version of the Agent.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
new file mode 100644
index 00000000..0487d7b7
--- /dev/null
+++ b/src/humanloop/types/agent_response.py
@@ -0,0 +1,265 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStop
+from .response_format import ResponseFormat
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+import typing_extensions
+from ..core.serialization import FieldMetadata
+from .environment_response import EnvironmentResponse
+import datetime as dt
+from .user_response import UserResponse
+from .version_status import VersionStatus
+from .input_response import InputResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentResponse(UncheckedBaseModel):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str = pydantic.Field()
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.List["AgentResponseToolsItem"] = pydantic.Field()
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the Agent.
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Long description of the file.
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the Agent.
+ """
+
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing.Optional[typing.Literal["agent"]] = None
+ environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None)
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus = pydantic.Field()
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.List[InputResponse] = pydantic.Field()
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None)
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+
+update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..59254f38
--- /dev/null
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py
new file mode 100644
index 00000000..5c3b6a48
--- /dev/null
+++ b/src/humanloop/types/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
new file mode 100644
index 00000000..4c084dc8
--- /dev/null
+++ b/src/humanloop/types/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
new file mode 100644
index 00000000..8095608f
--- /dev/null
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineTool
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponse
+AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool]
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..ebac897b
--- /dev/null
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicRedactedThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["redacted_thinking"] = "redacted_thinking"
+ data: str = pydantic.Field()
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
new file mode 100644
index 00000000..bf7fc808
--- /dev/null
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["thinking"] = "thinking"
+ thinking: str = pydantic.Field()
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str = pydantic.Field()
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c09f2768..c72bc90d 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -6,6 +6,7 @@
import pydantic
from .chat_role import ChatRole
from .tool_call import ToolCall
+from .chat_message_thinking_item import ChatMessageThinkingItem
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel):
A list of tool calls requested by the assistant.
"""
+ thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None)
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
new file mode 100644
index 00000000..0a507724
--- /dev/null
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContent
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+
+ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
new file mode 100644
index 00000000..9dc66629
--- /dev/null
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CreateAgentLogResponse(UncheckedBaseModel):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index af79f597..2c614521 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel):
Description of the Dataset.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 5828a678..51f879b8 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 0bfeebf7..9d0d5fc4 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,8 @@
from .evaluator_response import EvaluatorResponse
from .dataset_response import DatasetResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
- PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse
+ PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 9ba9fe4d..4332aa12 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 413081c6..0c7de27e 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 6c931db0..84d117e2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index f113fff5..bcda94a4 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 1203ce2c..74d59e4c 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index d91e1ee9..e09b2a73 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e457d580..71ca76c0 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -189,6 +191,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 175f456d..712ca698 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -5,6 +5,8 @@
import pydantic
import typing
from .evaluator_response_spec import EvaluatorResponseSpec
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel):
Description of the Evaluator.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -124,6 +133,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
new file mode 100644
index 00000000..128eed92
--- /dev/null
+++ b/src/humanloop/types/event_type.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EventType = typing.Union[
+ typing.Literal[
+ "agent_turn_start",
+ "agent_turn_suspend",
+ "agent_turn_continue",
+ "agent_turn_end",
+ "agent_start",
+ "agent_update",
+ "agent_end",
+ "tool_start",
+ "tool_update",
+ "tool_end",
+ "error",
+ "agent_generation_error",
+ ],
+ typing.Any,
+]
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 70ed322f..7f34b7b3 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 2a105c9d..0254c2b8 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
FileEnvironmentResponseFile = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
new file mode 100644
index 00000000..8108245b
--- /dev/null
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class FileEnvironmentVariableRequest(UncheckedBaseModel):
+ name: str = pydantic.Field()
+ """
+ Name of the environment variable.
+ """
+
+ value: str = pydantic.Field()
+ """
+ Value of the environment variable.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py
index 7a870b84..f235825b 100644
--- a/src/humanloop/types/file_type.py
+++ b/src/humanloop/types/file_type.py
@@ -2,4 +2,4 @@
import typing
-FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any]
+FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any]
diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py
index c32b9755..753d9ba2 100644
--- a/src/humanloop/types/files_tool_type.py
+++ b/src/humanloop/types/files_tool_type.py
@@ -3,5 +3,5 @@
import typing
FilesToolType = typing.Union[
- typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any
+ typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index ba1e1cf6..58a87fac 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -173,6 +175,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 4017b3b7..7768778e 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -4,6 +4,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel):
Description of the Flow.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -111,6 +120,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
new file mode 100644
index 00000000..ee45ffdf
--- /dev/null
+++ b/src/humanloop/types/linked_file_request.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class LinkedFileRequest(UncheckedBaseModel):
+ file_id: str
+ environment_id: typing.Optional[str] = None
+ version_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
new file mode 100644
index 00000000..36481f41
--- /dev/null
+++ b/src/humanloop/types/list_agents.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ListAgents(UncheckedBaseModel):
+ records: typing.List[AgentResponse] = pydantic.Field()
+ """
+ The list of Agents.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 61edbec5..7b736e14 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 686dab26..41ec4008 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index 94cda05e..f773d3f9 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 4080a6a1..84ddc89c 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index 0ba81dd3..cd7a0a26 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -9,4 +9,7 @@
from .tool_log_response import ToolLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
-LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"]
+ from .agent_log_response import AgentLogResponse
+LogResponse = typing.Union[
+ "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
+]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
new file mode 100644
index 00000000..69ffacf4
--- /dev/null
+++ b/src/humanloop/types/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponse
+from .agent_log_stream_response import AgentLogStreamResponse
+
+LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py
index 8473d2ae..3f2c99fb 100644
--- a/src/humanloop/types/model_providers.py
+++ b/src/humanloop/types/model_providers.py
@@ -4,7 +4,7 @@
ModelProviders = typing.Union[
typing.Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek"
+ "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate"
],
typing.Any,
]
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index e70dc4fb..1809af57 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -39,6 +39,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py
new file mode 100644
index 00000000..3730256e
--- /dev/null
+++ b/src/humanloop/types/on_agent_call_enum.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any]
diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py
new file mode 100644
index 00000000..d8c48547
--- /dev/null
+++ b/src/humanloop/types/open_ai_reasoning_effort.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
new file mode 100644
index 00000000..0febbadd
--- /dev/null
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedDataAgentResponse(UncheckedBaseModel):
+ records: typing.List[AgentResponse]
+ page: int
+ size: int
+ total: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index 9e3c568e..c508f8a6 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 275f0528..2e82c736 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 990d58be..6cfcf9ae 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index 57bae587..f41ca9ba 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index ff71e584..d9e1d914 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index 0e52b361..e2962e87 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 76%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index bd7082b3..87d5b603 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -9,16 +11,18 @@
from .version_deployment_response import VersionDeploymentResponse
from .version_id_response import VersionIdResponse
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel):
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
+ UncheckedBaseModel
+):
records: typing.List[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem
]
page: int
size: int
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 63%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 65c4f324..a1b4f056 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-]
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
+ typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
+)
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 78e177e8..16232e0b 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index d587d175..d2d36f78 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -16,9 +18,11 @@
from .model_providers import ModelProviders
from .populate_template_response_stop import PopulateTemplateResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -220,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..8dd9f7f6
--- /dev/null
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index 4e1ae69c..ec74437f 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 6461bb19..80ba5ed5 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -9,12 +9,18 @@
from .model_providers import ModelProviders
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .tool_function import ToolFunction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str = pydantic.Field()
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..dda61bb4
--- /dev/null
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 2a1bad11..a9e26318 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -213,6 +215,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 07f4755d..786617f3 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -10,9 +10,11 @@
from .model_providers import ModelProviders
from .prompt_response_stop import PromptResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -214,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -224,6 +238,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..e136637f
--- /dev/null
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py
deleted file mode 100644
index da0a0354..00000000
--- a/src/humanloop/types/reasoning_effort.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index d94b1178..770dc487 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -5,5 +5,6 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse]
+RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
new file mode 100644
index 00000000..55bf2712
--- /dev/null
+++ b/src/humanloop/types/tool_call_response.py
@@ -0,0 +1,168 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+import datetime as dt
+import pydantic
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ToolCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponse = pydantic.Field()
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 1b6081c3..251223af 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -13,6 +15,7 @@
import datetime as dt
import pydantic
from .log_status import LogStatus
+from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.pydantic_utilities import update_forward_refs
@@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel):
Tool used to generate the Log.
"""
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the Tool.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -162,6 +170,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 0b835918..70537215 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -152,6 +152,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index e2e82d9f..0db57d69 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -36,6 +36,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index e0f73573..4fadcff0 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionDeploymentResponseFile = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index 877851a9..e3f5dc27 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -30,6 +30,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 2f56346c..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
From 816eafff31a1d0a564e97c8b999d4bfbffb4afca Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 17:14:28 +0100
Subject: [PATCH 17/39] add path filter for pulling
---
src/humanloop/client.py | 12 +++-
src/humanloop/sync/sync_client.py | 106 ++++++++++++++++++++++++++----
2 files changed, 103 insertions(+), 15 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 48e179b2..ae489f7f 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -367,12 +367,17 @@ def pull(self,
"""Pull prompt and agent files from Humanloop to local filesystem.
This method will:
- 1. Fetch all prompt and agent files from your Humanloop workspace
+ 1. Fetch prompt and agent files from your Humanloop workspace
2. Save them to the local filesystem using the client's files_directory (set during initialization)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
- By default, the operation will overwrite existing files with the latest version from Humanlooop
+ The path parameter can be used in two ways:
+ - If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all prompt and agent files in that directory will be pulled
+ - If no path is provided, all prompt and agent files will be pulled
+
+ The operation will overwrite existing files with the latest version from Humanloop
but will not delete local files that don't exist in the remote workspace.
Currently only supports syncing prompt and agent files. Other file types will be skipped.
@@ -389,7 +394,8 @@ def pull(self,
```
:param environment: The environment to pull the files from.
- :param path: The path to the files to pull on the Humanloop workspace. Can be a directory or a specific file.
+ :param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
+ If not provided, all prompt and agent files will be pulled.
:return: List of successfully processed file paths.
"""
return self._sync_client.pull(
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index b8b8e855..6725a30d 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -40,6 +40,46 @@ def __init__(
self.base_dir = Path(base_dir)
self.max_workers = max_workers or multiprocessing.cpu_count() * 2
+ def _normalize_path(self, path: str) -> str:
+ """Normalize the path by:
+ 1. Removing any file extensions (.prompt, .agent)
+ 2. Converting backslashes to forward slashes
+ 3. Removing leading and trailing slashes
+ 4. Removing leading and trailing whitespace
+ 5. Normalizing multiple consecutive slashes into a single forward slash
+
+ Args:
+ path: The path to normalize
+
+ Returns:
+ The normalized path
+ """
+ # Remove any file extensions
+ path = path.rsplit('.', 1)[0] if '.' in path else path
+
+ # Convert backslashes to forward slashes and normalize multiple slashes
+ path = path.replace('\\', '/')
+
+ # Remove leading/trailing whitespace and slashes
+ path = path.strip().strip('/')
+
+ # Normalize multiple consecutive slashes into a single forward slash
+ while '//' in path:
+ path = path.replace('//', '/')
+
+ return path
+
+ def is_file(self, path: str) -> bool:
+ """Check if the path is a file by checking for .prompt or .agent extension.
+
+ Args:
+ path: The path to check
+
+ Returns:
+ True if the path ends with .prompt or .agent, False otherwise
+ """
+ return path.endswith('.prompt') or path.endswith('.agent')
+
def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
"""Save serialized file to local filesystem.
@@ -47,6 +87,9 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
serialized_content: The content to save
file_path: The path where to save the file
file_type: The type of file (prompt or agent)
+
+ Raises:
+ Exception: If there is an error saving the file
"""
try:
# Create full path including base_dir prefix
@@ -65,26 +108,44 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
- def pull(self,
+ def _pull_file(self, path: str, environment: str | None = None) -> None:
+ """Pull a specific file from Humanloop to local filesystem.
+
+ Args:
+ path: The path of the file without the extension (e.g. "path/to/file")
+ environment: The environment to pull the file from
+
+ Raises:
+ ValueError: If the file type is not supported
+ Exception: If there is an error pulling the file
+ """
+ file = self.client.files.retrieve_by_path(
+ path,
+ environment=environment,
+ include_content=True
+ )
+
+ if file.type not in ["prompt", "agent"]:
+ raise ValueError(f"Unsupported file type: {file.type}")
+
+ self._save_serialized_file(file.content, file.path, file.type)
+
+ def _pull_directory(self,
+ path: str | None = None,
environment: str | None = None,
- directory: str | None = None,
- path: str | None = None,
) -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
- If `path` is provided, only the file at that path will be pulled.
- If `directory` is provided, all files in that directory will be pulled (if both `path` and `directory` are provided, `path` will take precedence).
+ If `path` is provided, only the files under that path will be pulled.
If `environment` is provided, the files will be pulled from that environment.
Args:
- environment: The environment to pull the files from.
- directory: The directory to pull the files from.
- path: The path of a specific file to pull from.
+ path: The path of the directory to pull from (e.g. "path/to/directory")
+ environment: The environment to pull the files from
Returns:
List of successfully processed file paths
"""
-
successful_files = []
failed_files = []
page = 1
@@ -95,7 +156,8 @@ def pull(self,
type=["prompt", "agent"],
page=page,
include_content=True,
- environment=environment
+ environment=environment,
+ directory=path
)
if len(response.records) == 0:
@@ -109,7 +171,7 @@ def pull(self,
continue
if not file.path.startswith(path):
- # Filter by path
+ # Filter by path
continue
# Skip if no content
@@ -135,4 +197,24 @@ def pull(self,
if failed_files:
logger.error(f"Failed to sync {len(failed_files)} files")
- return successful_files
\ No newline at end of file
+ return successful_files
+
+ def pull(self, path: str, environment: str | None = None) -> List[str]:
+ """Pull files from Humanloop to local filesystem.
+
+ If the path ends with .prompt or .agent, pulls that specific file.
+ Otherwise, pulls all files under the specified directory path.
+
+ Args:
+ path: The path to pull from (either a specific file or directory)
+ environment: The environment to pull from
+
+ Returns:
+ List of successfully processed file paths
+ """
+ normalized_path = self._normalize_path(path)
+ if self.is_file(path):
+ self._pull_file(normalized_path, environment)
+ return [path]
+ else:
+ return self._pull_directory(normalized_path, environment)
From 39662df6a6b574f9c40c7d9826cebe465d8a1ad3 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 17:21:46 +0100
Subject: [PATCH 18/39] refactor overload, moving functionality to sync client
+ implement LRU cache for reading local files
---
src/humanloop/client.py | 2 +
src/humanloop/overload.py | 37 +++--------------
src/humanloop/sync/sync_client.py | 69 ++++++++++++++++++++++++++++---
3 files changed, 72 insertions(+), 36 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index ae489f7f..c303e7cf 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -133,10 +133,12 @@ def __init__(
self.prompts = overload_call(client=self.prompts)
self.prompts = overload_with_local_files(
client=self.prompts,
+ sync_client=self._sync_client,
use_local_files=self.use_local_files
)
self.agents = overload_with_local_files(
client=self.agents,
+ sync_client=self._sync_client,
use_local_files=self.use_local_files
)
self.flows = overload_log(client=self.flows)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index fbdc5fac..5d874d3d 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -15,6 +15,7 @@
from humanloop.prompts.client import PromptsClient
from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
+from humanloop.sync.sync_client import SyncClient
from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
@@ -135,37 +136,9 @@ def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> Fi
else:
raise ValueError(f"Unsupported client type: {type(client)}")
-def _handle_local_file(path: str, file_type: FileType) -> Optional[str]:
- """Handle reading from a local file if it exists.
-
- Args:
- path: The path to the file
- file_type: The type of file ("prompt" or "agent")
-
- Returns:
- The file content if found, None otherwise
- """
- try:
- # Construct path to local file
- local_path = Path("humanloop") / path # FLAG: ensure that when passing the path back to remote, it's using forward slashes
- # Add appropriate extension
- local_path = local_path.parent / f"{local_path.stem}.{file_type}"
-
- if local_path.exists():
- # Read the file content
- with open(local_path) as f:
- file_content = f.read()
- logger.debug(f"Using local file content from {local_path}")
- return file_content
- else:
- logger.warning(f"Local file not found: {local_path}, falling back to API")
- return None
- except Exception as e:
- logger.error(f"Error reading local file: {e}, falling back to API")
- return None
-
def overload_with_local_files(
client: Union[PromptsClient, AgentsClient],
+ sync_client: SyncClient,
use_local_files: bool,
) -> Union[PromptsClient, AgentsClient]:
"""Overload call and log methods to handle local files when use_local_files is True.
@@ -181,9 +154,11 @@ def overload_with_local_files(
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
# Handle local files if enabled
if use_local_files and "path" in kwargs:
- file_content = _handle_local_file(kwargs["path"], file_type)
+ # Normalize the path and get file content
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+ file_content = sync_client.get_file_content(normalized_path, file_type)
if file_content is not None:
- kwargs[file_type] = file_content
+ kwargs[file_type] = file_content
try:
if function_name == "call":
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 6725a30d..3f456d37 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -3,7 +3,8 @@
import logging
from pathlib import Path
import concurrent.futures
-from typing import List, TYPE_CHECKING, Union, cast, Optional
+from typing import List, TYPE_CHECKING, Union, cast, Optional, Dict
+from functools import lru_cache
from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
from humanloop.core.api_error import ApiError
@@ -21,24 +22,78 @@
logger.addHandler(console_handler)
class SyncClient:
- """Client for managing synchronization between local filesystem and Humanloop."""
+ """Client for managing synchronization between local filesystem and Humanloop.
+
+ This client provides file synchronization between Humanloop and the local filesystem,
+ with built-in caching for improved performance. The cache uses Python's LRU (Least
+ Recently Used) cache to automatically manage memory usage by removing least recently
+ accessed files when the cache is full.
+
+ The cache is automatically updated when files are pulled or saved, and can be
+ manually cleared using the clear_cache() method.
+ """
def __init__(
self,
client: "BaseHumanloop",
base_dir: str = "humanloop",
- max_workers: Optional[int] = None
+ cache_size: int = 100
):
"""
Parameters
----------
client: Humanloop client instance
base_dir: Base directory for synced files (default: "humanloop")
- max_workers: Maximum number of worker threads (default: CPU count * 2)
+ cache_size: Maximum number of files to cache (default: 100)
"""
self.client = client
self.base_dir = Path(base_dir)
- self.max_workers = max_workers or multiprocessing.cpu_count() * 2
+ self._cache_size = cache_size
+ # Create a new cached version of get_file_content with the specified cache size
+ self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
+
+ def _get_file_content_impl(self, path: str, file_type: FileType) -> Optional[str]:
+ """Implementation of get_file_content without the cache decorator.
+
+ This is the actual implementation that gets wrapped by lru_cache.
+ """
+ try:
+ # Construct path to local file
+ local_path = self.base_dir / path
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if local_path.exists():
+ # Read the file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
+ else:
+ logger.warning(f"Local file not found: {local_path}, falling back to API")
+ return None
+ except Exception as e:
+ logger.error(f"Error reading local file: {e}, falling back to API")
+ return None
+
+ def get_file_content(self, path: str, file_type: FileType) -> Optional[str]:
+ """Get the content of a file from cache or filesystem.
+
+ This method uses an LRU cache to store file contents. When the cache is full,
+ the least recently accessed files are automatically removed to make space.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (prompt or agent)
+
+ Returns:
+ The file content if found, None otherwise
+ """
+ return self._get_file_content_impl(path, file_type)
+
+ def clear_cache(self) -> None:
+ """Clear the LRU cache."""
+ self.get_file_content.cache_clear()
def _normalize_path(self, path: str) -> str:
"""Normalize the path by:
@@ -103,6 +158,10 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
# Write content to file
with open(new_path, "w") as f:
f.write(serialized_content)
+
+ # Clear the cache for this file to ensure we get fresh content next time
+ self.clear_cache()
+
logger.info(f"Syncing {file_type} {file_path}")
except Exception as e:
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
From db5e37ae996dc68e39274b559413ce9d01adf129 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 17:26:42 +0100
Subject: [PATCH 19/39] add DEFAULT_CACHE_SIZE constant
---
src/humanloop/client.py | 24 ++++++++++++++++++++++--
src/humanloop/sync/sync_client.py | 7 +++++--
2 files changed, 27 insertions(+), 4 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index c303e7cf..563d1f33 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -24,7 +24,7 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
-from humanloop.sync.sync_client import SyncClient
+from humanloop.sync.sync_client import SyncClient, DEFAULT_CACHE_SIZE
class ExtendedEvalsClient(EvaluationsClient):
@@ -101,6 +101,7 @@ def __init__(
opentelemetry_tracer: Optional[Tracer] = None,
use_local_files: bool = False,
files_directory: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
):
"""
Extends the base client with custom evaluation utilities and
@@ -110,6 +111,21 @@ def __init__(
You can provide a TracerProvider and a Tracer to integrate
with your existing telemetry system. If not provided,
an internal TracerProvider will be used.
+
+ Parameters
+ ----------
+ base_url: Optional base URL for the API
+ environment: The environment to use (default: DEFAULT)
+ api_key: Your Humanloop API key (default: from HUMANLOOP_API_KEY env var)
+ timeout: Optional timeout for API requests
+ follow_redirects: Whether to follow redirects
+ httpx_client: Optional custom httpx client
+ opentelemetry_tracer_provider: Optional tracer provider for telemetry
+ opentelemetry_tracer: Optional tracer for telemetry
+ use_local_files: Whether to use local files for prompts and agents
+ files_directory: Directory for local files (default: "humanloop")
+ cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
+ This parameter has no effect if use_local_files is False.
"""
super().__init__(
base_url=base_url,
@@ -121,7 +137,11 @@ def __init__(
)
self.use_local_files = use_local_files
- self._sync_client = SyncClient(client=self, base_dir=files_directory)
+ self._sync_client = SyncClient(
+ client=self,
+ base_dir=files_directory,
+ cache_size=cache_size
+ )
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 3f456d37..b0d04636 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -21,6 +21,9 @@
if not logger.hasHandlers():
logger.addHandler(console_handler)
+# Default cache size for file content caching
+DEFAULT_CACHE_SIZE = 100
+
class SyncClient:
"""Client for managing synchronization between local filesystem and Humanloop.
@@ -37,14 +40,14 @@ def __init__(
self,
client: "BaseHumanloop",
base_dir: str = "humanloop",
- cache_size: int = 100
+ cache_size: int = DEFAULT_CACHE_SIZE
):
"""
Parameters
----------
client: Humanloop client instance
base_dir: Base directory for synced files (default: "humanloop")
- cache_size: Maximum number of files to cache (default: 100)
+ cache_size: Maximum number of files to cache (default: DEFAULT_CACHE_SIZE)
"""
self.client = client
self.base_dir = Path(base_dir)
From b3713484d58b9cffa07a940927f8d138838bda3f Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 17:27:05 +0100
Subject: [PATCH 20/39] remove unused imports in sync client
---
src/humanloop/sync/sync_client.py | 8 +-------
1 file changed, 1 insertion(+), 7 deletions(-)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index b0d04636..82fb224f 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -1,14 +1,8 @@
-import multiprocessing
-import os
import logging
from pathlib import Path
-import concurrent.futures
-from typing import List, TYPE_CHECKING, Union, cast, Optional, Dict
+from typing import List, TYPE_CHECKING, Optional
from functools import lru_cache
-from humanloop.types import FileType, PromptResponse, AgentResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-from humanloop.core.api_error import ApiError
-
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
From 693262f3d59aad2a04e95940f3b642c3852cbcd3 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 30 Apr 2025 16:30:48 +0000
Subject: [PATCH 21/39] Release 0.8.36
---
poetry.lock | 302 +-
pyproject.toml | 2 +-
reference.md | 5751 +++++++++++++----
src/humanloop/__init__.py | 237 +-
src/humanloop/agents/__init__.py | 49 +
src/humanloop/agents/client.py | 3210 +++++++++
src/humanloop/agents/raw_client.py | 3891 +++++++++++
src/humanloop/agents/requests/__init__.py | 25 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agent_log_request_tool_choice.py | 8 +
.../agent_request_reasoning_effort.py | 6 +
.../agents/requests/agent_request_stop.py | 5 +
.../agents/requests/agent_request_template.py | 6 +
.../requests/agent_request_tools_item.py | 7 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_request_tool_choice.py | 8 +
.../agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/agents/types/__init__.py | 25 +
.../agents/types/agent_log_request_agent.py | 6 +
.../types/agent_log_request_tool_choice.py | 8 +
.../types/agent_request_reasoning_effort.py | 6 +
.../agents/types/agent_request_stop.py | 5 +
.../agents/types/agent_request_template.py | 6 +
.../agents/types/agent_request_tools_item.py | 7 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_request_tool_choice.py | 8 +
.../types/agents_call_stream_request_agent.py | 6 +
.../agents_call_stream_request_tool_choice.py | 8 +
src/humanloop/base_client.py | 4 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/files/client.py | 44 +-
src/humanloop/files/raw_client.py | 54 +-
...th_files_retrieve_by_path_post_response.py | 8 +-
...th_files_retrieve_by_path_post_response.py | 3 +-
src/humanloop/flows/client.py | 8 +-
src/humanloop/logs/client.py | 4 +-
src/humanloop/prompts/__init__.py | 16 +
src/humanloop/prompts/client.py | 267 +-
src/humanloop/prompts/raw_client.py | 335 +-
src/humanloop/prompts/requests/__init__.py | 8 +
.../requests/prompt_log_request_prompt.py | 6 +
.../prompt_request_reasoning_effort.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 8 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompt_request_reasoning_effort.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/requests/__init__.py | 88 +-
src/humanloop/requests/agent_call_response.py | 202 +
.../agent_call_response_tool_choice.py | 8 +
.../requests/agent_call_stream_response.py | 19 +
.../agent_call_stream_response_payload.py | 8 +
.../requests/agent_continue_response.py | 202 +
.../agent_continue_response_tool_choice.py | 8 +
.../agent_continue_stream_response.py | 19 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/requests/agent_inline_tool.py | 13 +
.../requests/agent_kernel_request.py | 112 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../requests/agent_kernel_request_stop.py | 5 +
.../requests/agent_kernel_request_template.py | 6 +
.../agent_kernel_request_tools_item.py | 7 +
.../requests/agent_linked_file_request.py | 13 +
.../requests/agent_linked_file_response.py | 19 +
.../agent_linked_file_response_file.py | 21 +
src/humanloop/requests/agent_log_response.py | 201 +
.../agent_log_response_tool_choice.py | 8 +
.../requests/agent_log_stream_response.py | 87 +
src/humanloop/requests/agent_response.py | 242 +
.../agent_response_reasoning_effort.py | 6 +
src/humanloop/requests/agent_response_stop.py | 5 +
.../requests/agent_response_template.py | 6 +
.../requests/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 12 +
.../requests/anthropic_thinking_content.py | 17 +
src/humanloop/requests/chat_message.py | 6 +
.../requests/chat_message_thinking_item.py | 7 +
.../requests/create_agent_log_response.py | 31 +
src/humanloop/requests/dataset_response.py | 5 +
...arents_and_children_response_files_item.py | 8 +-
src/humanloop/requests/evaluator_response.py | 5 +
.../file_environment_response_file.py | 8 +-
.../file_environment_variable_request.py | 15 +
src/humanloop/requests/flow_response.py | 5 +
src/humanloop/requests/linked_file_request.py | 10 +
src/humanloop/requests/list_agents.py | 12 +
src/humanloop/requests/log_response.py | 7 +-
src/humanloop/requests/log_stream_response.py | 7 +
.../requests/paginated_data_agent_response.py | 12 +
..._response_flow_response_agent_response.py} | 8 +-
...w_response_agent_response_records_item.py} | 14 +-
.../requests/populate_template_response.py | 16 +-
...late_template_response_reasoning_effort.py | 6 +
.../requests/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/requests/prompt_response.py | 16 +-
.../prompt_response_reasoning_effort.py | 6 +
.../requests/run_version_response.py | 3 +-
src/humanloop/requests/tool_call_response.py | 146 +
src/humanloop/requests/tool_log_response.py | 6 +
.../version_deployment_response_file.py | 8 +-
.../requests/version_id_response_version.py | 8 +-
src/humanloop/tools/client.py | 523 +-
src/humanloop/tools/raw_client.py | 765 ++-
src/humanloop/types/__init__.py | 96 +-
src/humanloop/types/agent_call_response.py | 224 +
.../types/agent_call_response_tool_choice.py | 8 +
.../types/agent_call_stream_response.py | 44 +
.../agent_call_stream_response_payload.py | 8 +
.../types/agent_continue_response.py | 224 +
.../agent_continue_response_tool_choice.py | 8 +
.../types/agent_continue_stream_response.py | 44 +
.../agent_continue_stream_response_payload.py | 8 +
src/humanloop/types/agent_inline_tool.py | 23 +
src/humanloop/types/agent_kernel_request.py | 122 +
.../agent_kernel_request_reasoning_effort.py | 6 +
.../types/agent_kernel_request_stop.py | 5 +
.../types/agent_kernel_request_template.py | 6 +
.../types/agent_kernel_request_tools_item.py | 7 +
.../types/agent_linked_file_request.py | 23 +
.../types/agent_linked_file_response.py | 39 +
.../types/agent_linked_file_response_file.py | 16 +
src/humanloop/types/agent_log_response.py | 224 +
.../types/agent_log_response_tool_choice.py | 8 +
.../types/agent_log_stream_response.py | 98 +
src/humanloop/types/agent_response.py | 265 +
.../types/agent_response_reasoning_effort.py | 6 +
src/humanloop/types/agent_response_stop.py | 5 +
.../types/agent_response_template.py | 6 +
.../types/agent_response_tools_item.py | 10 +
.../anthropic_redacted_thinking_content.py | 23 +
.../types/anthropic_thinking_content.py | 28 +
src/humanloop/types/chat_message.py | 6 +
.../types/chat_message_thinking_item.py | 7 +
.../types/create_agent_log_response.py | 42 +
src/humanloop/types/dataset_response.py | 9 +
...tory_with_parents_and_children_response.py | 2 +
...arents_and_children_response_files_item.py | 3 +-
src/humanloop/types/evaluatee_response.py | 2 +
.../types/evaluation_evaluator_response.py | 2 +
.../types/evaluation_log_response.py | 3 +
src/humanloop/types/evaluation_response.py | 2 +
.../types/evaluation_run_response.py | 2 +
.../types/evaluation_runs_response.py | 2 +
src/humanloop/types/evaluator_log_response.py | 3 +
src/humanloop/types/evaluator_response.py | 11 +
src/humanloop/types/event_type.py | 21 +
.../types/file_environment_response.py | 2 +
.../types/file_environment_response_file.py | 3 +-
.../file_environment_variable_request.py | 27 +
src/humanloop/types/file_type.py | 2 +-
src/humanloop/types/files_tool_type.py | 2 +-
src/humanloop/types/flow_log_response.py | 3 +
src/humanloop/types/flow_response.py | 11 +
src/humanloop/types/linked_file_request.py | 21 +
src/humanloop/types/list_agents.py | 31 +
src/humanloop/types/list_evaluators.py | 2 +
src/humanloop/types/list_flows.py | 2 +
src/humanloop/types/list_prompts.py | 2 +
src/humanloop/types/list_tools.py | 2 +
src/humanloop/types/log_response.py | 5 +-
src/humanloop/types/log_stream_response.py | 7 +
src/humanloop/types/model_providers.py | 2 +-
.../types/monitoring_evaluator_response.py | 2 +
src/humanloop/types/on_agent_call_enum.py | 5 +
.../types/open_ai_reasoning_effort.py | 5 +
.../types/paginated_data_agent_response.py | 31 +
.../paginated_data_evaluation_log_response.py | 3 +
.../paginated_data_evaluator_response.py | 2 +
.../types/paginated_data_flow_response.py | 2 +
.../types/paginated_data_log_response.py | 3 +
.../types/paginated_data_prompt_response.py | 2 +
.../types/paginated_data_tool_response.py | 2 +
..._response_flow_response_agent_response.py} | 12 +-
...w_response_agent_response_records_item.py} | 7 +-
.../types/paginated_evaluation_response.py | 2 +
.../types/populate_template_response.py | 22 +-
...late_template_response_reasoning_effort.py | 6 +
src/humanloop/types/prompt_call_response.py | 2 +
src/humanloop/types/prompt_kernel_request.py | 12 +-
.../prompt_kernel_request_reasoning_effort.py | 6 +
src/humanloop/types/prompt_log_response.py | 3 +
src/humanloop/types/prompt_response.py | 22 +-
.../types/prompt_response_reasoning_effort.py | 6 +
src/humanloop/types/reasoning_effort.py | 5 -
src/humanloop/types/run_version_response.py | 3 +-
src/humanloop/types/tool_call_response.py | 168 +
src/humanloop/types/tool_log_response.py | 9 +
src/humanloop/types/tool_response.py | 2 +
.../types/version_deployment_response.py | 2 +
.../types/version_deployment_response_file.py | 3 +-
src/humanloop/types/version_id_response.py | 2 +
.../types/version_id_response_version.py | 3 +-
196 files changed, 17976 insertions(+), 1677 deletions(-)
create mode 100644 src/humanloop/agents/__init__.py
create mode 100644 src/humanloop/agents/client.py
create mode 100644 src/humanloop/agents/raw_client.py
create mode 100644 src/humanloop/agents/requests/__init__.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/requests/agent_request_stop.py
create mode 100644 src/humanloop/agents/requests/agent_request_template.py
create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/__init__.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agent_request_reasoning_effort.py
create mode 100644 src/humanloop/agents/types/agent_request_stop.py
create mode 100644 src/humanloop/agents/types/agent_request_template.py
create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_request_reasoning_effort.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/requests/agent_call_response.py
create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_call_stream_response.py
create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_continue_response.py
create mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response.py
create mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/requests/agent_inline_tool.py
create mode 100644 src/humanloop/requests/agent_kernel_request.py
create mode 100644 src/humanloop/requests/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py
create mode 100644 src/humanloop/requests/agent_kernel_request_template.py
create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/requests/agent_linked_file_request.py
create mode 100644 src/humanloop/requests/agent_linked_file_response.py
create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py
create mode 100644 src/humanloop/requests/agent_log_response.py
create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/requests/agent_log_stream_response.py
create mode 100644 src/humanloop/requests/agent_response.py
create mode 100644 src/humanloop/requests/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/agent_response_stop.py
create mode 100644 src/humanloop/requests/agent_response_template.py
create mode 100644 src/humanloop/requests/agent_response_tools_item.py
create mode 100644 src/humanloop/requests/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/requests/anthropic_thinking_content.py
create mode 100644 src/humanloop/requests/chat_message_thinking_item.py
create mode 100644 src/humanloop/requests/create_agent_log_response.py
create mode 100644 src/humanloop/requests/file_environment_variable_request.py
create mode 100644 src/humanloop/requests/linked_file_request.py
create mode 100644 src/humanloop/requests/list_agents.py
create mode 100644 src/humanloop/requests/log_stream_response.py
create mode 100644 src/humanloop/requests/paginated_data_agent_response.py
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%)
rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%)
create mode 100644 src/humanloop/requests/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/requests/prompt_response_reasoning_effort.py
create mode 100644 src/humanloop/requests/tool_call_response.py
create mode 100644 src/humanloop/types/agent_call_response.py
create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_call_stream_response.py
create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_continue_response.py
create mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_continue_stream_response.py
create mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/agent_inline_tool.py
create mode 100644 src/humanloop/types/agent_kernel_request.py
create mode 100644 src/humanloop/types/agent_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_kernel_request_stop.py
create mode 100644 src/humanloop/types/agent_kernel_request_template.py
create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py
create mode 100644 src/humanloop/types/agent_linked_file_request.py
create mode 100644 src/humanloop/types/agent_linked_file_response.py
create mode 100644 src/humanloop/types/agent_linked_file_response_file.py
create mode 100644 src/humanloop/types/agent_log_response.py
create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py
create mode 100644 src/humanloop/types/agent_log_stream_response.py
create mode 100644 src/humanloop/types/agent_response.py
create mode 100644 src/humanloop/types/agent_response_reasoning_effort.py
create mode 100644 src/humanloop/types/agent_response_stop.py
create mode 100644 src/humanloop/types/agent_response_template.py
create mode 100644 src/humanloop/types/agent_response_tools_item.py
create mode 100644 src/humanloop/types/anthropic_redacted_thinking_content.py
create mode 100644 src/humanloop/types/anthropic_thinking_content.py
create mode 100644 src/humanloop/types/chat_message_thinking_item.py
create mode 100644 src/humanloop/types/create_agent_log_response.py
create mode 100644 src/humanloop/types/event_type.py
create mode 100644 src/humanloop/types/file_environment_variable_request.py
create mode 100644 src/humanloop/types/linked_file_request.py
create mode 100644 src/humanloop/types/list_agents.py
create mode 100644 src/humanloop/types/log_stream_response.py
create mode 100644 src/humanloop/types/on_agent_call_enum.py
create mode 100644 src/humanloop/types/open_ai_reasoning_effort.py
create mode 100644 src/humanloop/types/paginated_data_agent_response.py
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (76%)
rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%)
create mode 100644 src/humanloop/types/populate_template_response_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_kernel_request_reasoning_effort.py
create mode 100644 src/humanloop/types/prompt_response_reasoning_effort.py
delete mode 100644 src/humanloop/types/reasoning_effort.py
create mode 100644 src/humanloop/types/tool_call_response.py
diff --git a/poetry.lock b/poetry.lock
index 4ce5d536..cfe8a240 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -78,13 +78,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
[[package]]
name = "certifi"
-version = "2025.1.31"
+version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"},
- {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"},
+ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
+ {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.0"
+version = "0.23.1"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.0-py3-none-any.whl", hash = "sha256:039817a6b75d70f129f0591f8c79d3f7655dcf728b709fe5f08cfeadb1d9cc19"},
- {file = "groq-0.23.0.tar.gz", hash = "sha256:426e1d89df5791b34fa3f2eb827aec38490b9b2de5a44bbba6161cf5282ea5c9"},
+ {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
+ {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
]
[package.dependencies]
@@ -403,29 +403,29 @@ typing-extensions = ">=4.10,<5"
[[package]]
name = "h11"
-version = "0.14.0"
+version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
- {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+ {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
+ {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
-version = "1.0.8"
+version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
- {file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
- {file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
+ {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
+ {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
-h11 = ">=0.13,<0.15"
+h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.75.0"
+version = "1.76.2"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.75.0-py3-none-any.whl", hash = "sha256:fe6f932d2ded3b429ff67cc9ad118c71327db32eb9d32dd723de3acfca337125"},
- {file = "openai-1.75.0.tar.gz", hash = "sha256:fb3ea907efbdb1bcfd0c44507ad9c961afd7dce3147292b54505ecfd17be8fd1"},
+ {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
+ {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
]
[package.dependencies]
@@ -931,30 +931,30 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.39.2-py3-none-any.whl", hash = "sha256:e1bfed6f4e140e0e35d19d44281c968970004467ccc1f40a07233618f798809c"},
- {file = "opentelemetry_instrumentation_anthropic-0.39.2.tar.gz", hash = "sha256:a0dab35b4bc8561623b8f503220846a6b5ad07cd7d3277eeaf5e865d57c6e266"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.39.2-py3-none-any.whl", hash = "sha256:dca1b2c5d0c74f41254c6de39fed51167357469159f9453cd9815143a213a1c8"},
- {file = "opentelemetry_instrumentation_bedrock-0.39.2.tar.gz", hash = "sha256:ffe79fa8302dde69c5df86e602288ab48d31bdf3dffe6846cbe6a75cc0bb6385"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
]
[package.dependencies]
@@ -962,77 +962,77 @@ anthropic = ">=0.17.0"
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.39.2-py3-none-any.whl", hash = "sha256:a71e289231c3ddbe67dd32c0ed8df8b55367ab594410f2cff82f27784268cba5"},
- {file = "opentelemetry_instrumentation_cohere-0.39.2.tar.gz", hash = "sha256:7a7e441d2c8c862e8ba84170bcaef81c5d5e63b42243b7dcc887541a71c90e15"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.39.2-py3-none-any.whl", hash = "sha256:0a19571ef86ce46b18e3c5402d321b620c8d5257bc968e8d7073c8937a376970"},
- {file = "opentelemetry_instrumentation_groq-0.39.2.tar.gz", hash = "sha256:b28a2220f24d8fbea12dc4452ef5812e7ba67c6824b4e62278c3b3ada2248acc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
+ {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.39.2-py3-none-any.whl", hash = "sha256:a9016e577a8c11cdfc6d79ebb84ed5f6dcacb59d709d250e40b3d08f9d4c25a2"},
- {file = "opentelemetry_instrumentation_openai-0.39.2.tar.gz", hash = "sha256:25cf133fa3b623f123d953c9d637e6529a1790cd2898bf4d6a50c5bffe260821"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
+ {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.39.2"
+version = "0.40.2"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.39.2-py3-none-any.whl", hash = "sha256:778ec5a2bf7767b7377ece0dec66dc2d02f1ea8ca3f8037c96c7b6695c56b8db"},
- {file = "opentelemetry_instrumentation_replicate-0.39.2.tar.gz", hash = "sha256:6b9ddbf89d844ffc3725925af04fbee3a0f7a6d19d6050fb9c72bb8dd2eca7eb"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
]
[package.dependencies]
opentelemetry-api = ">=1.28.0,<2.0.0"
opentelemetry-instrumentation = ">=0.50b0"
opentelemetry-semantic-conventions = ">=0.50b0"
-opentelemetry-semantic-conventions-ai = "0.4.3"
+opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-proto"
@@ -1081,13 +1081,13 @@ opentelemetry-api = "1.32.1"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
-version = "0.4.3"
+version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_semantic_conventions_ai-0.4.3-py3-none-any.whl", hash = "sha256:9ff60bbf38c8a891c20a355b4ca1948380361e27412c3ead264de0d050fa2570"},
- {file = "opentelemetry_semantic_conventions_ai-0.4.3.tar.gz", hash = "sha256:761a68a7e99436dfc53cfe1f99507316aa0114ac480f0c42743b9320b7c94831"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
+ {file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
]
[[package]]
@@ -1320,18 +1320,18 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"]
[[package]]
name = "pydantic"
-version = "2.11.3"
+version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic-2.11.3-py3-none-any.whl", hash = "sha256:a082753436a07f9ba1289c6ffa01cd93db3548776088aa917cc43b63f68fa60f"},
- {file = "pydantic-2.11.3.tar.gz", hash = "sha256:7471657138c16adad9322fe3070c0116dd6c3ad8d649300e3cbdfe91f4db4ec3"},
+ {file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
+ {file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
-pydantic-core = "2.33.1"
+pydantic-core = "2.33.2"
typing-extensions = ">=4.12.2"
typing-inspection = ">=0.4.0"
@@ -1341,110 +1341,110 @@ timezone = ["tzdata"]
[[package]]
name = "pydantic-core"
-version = "2.33.1"
+version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
files = [
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3077cfdb6125cc8dab61b155fdd714663e401f0e6883f9632118ec12cf42df26"},
- {file = "pydantic_core-2.33.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8ffab8b2908d152e74862d276cf5017c81a2f3719f14e8e3e8d6b83fda863927"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5183e4f6a2d468787243ebcd70cf4098c247e60d73fb7d68d5bc1e1beaa0c4db"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:398a38d323f37714023be1e0285765f0a27243a8b1506b7b7de87b647b517e48"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d3776f0001b43acebfa86f8c64019c043b55cc5a6a2e313d728b5c95b46969"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c566dd9c5f63d22226409553531f89de0cac55397f2ab8d97d6f06cfce6d947e"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d5f3acc81452c56895e90643a625302bd6be351e7010664151cc55b7b97f89"},
- {file = "pydantic_core-2.33.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3a07fadec2a13274a8d861d3d37c61e97a816beae717efccaa4b36dfcaadcde"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f99aeda58dce827f76963ee87a0ebe75e648c72ff9ba1174a253f6744f518f65"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:902dbc832141aa0ec374f4310f1e4e7febeebc3256f00dc359a9ac3f264a45dc"},
- {file = "pydantic_core-2.33.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fe44d56aa0b00d66640aa84a3cbe80b7a3ccdc6f0b1ca71090696a6d4777c091"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win32.whl", hash = "sha256:ed3eb16d51257c763539bde21e011092f127a2202692afaeaccb50db55a31383"},
- {file = "pydantic_core-2.33.1-cp310-cp310-win_amd64.whl", hash = "sha256:694ad99a7f6718c1a498dc170ca430687a39894a60327f548e02a9c7ee4b6504"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6e966fc3caaf9f1d96b349b0341c70c8d6573bf1bac7261f7b0ba88f96c56c24"},
- {file = "pydantic_core-2.33.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bfd0adeee563d59c598ceabddf2c92eec77abcb3f4a391b19aa7366170bd9e30"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91815221101ad3c6b507804178a7bb5cb7b2ead9ecd600041669c8d805ebd595"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9fea9c1869bb4742d174a57b4700c6dadea951df8b06de40c2fedb4f02931c2e"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d20eb4861329bb2484c021b9d9a977566ab16d84000a57e28061151c62b349a"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb935c5591573ae3201640579f30128ccc10739b45663f93c06796854405505"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c964fd24e6166420d18fb53996d8c9fd6eac9bf5ae3ec3d03015be4414ce497f"},
- {file = "pydantic_core-2.33.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:681d65e9011f7392db5aa002b7423cc442d6a673c635668c227c6c8d0e5a4f77"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e100c52f7355a48413e2999bfb4e139d2977a904495441b374f3d4fb4a170961"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:048831bd363490be79acdd3232f74a0e9951b11b2b4cc058aeb72b22fdc3abe1"},
- {file = "pydantic_core-2.33.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bdc84017d28459c00db6f918a7272a5190bec3090058334e43a76afb279eac7c"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win32.whl", hash = "sha256:32cd11c5914d1179df70406427097c7dcde19fddf1418c787540f4b730289896"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_amd64.whl", hash = "sha256:2ea62419ba8c397e7da28a9170a16219d310d2cf4970dbc65c32faf20d828c83"},
- {file = "pydantic_core-2.33.1-cp311-cp311-win_arm64.whl", hash = "sha256:fc903512177361e868bc1f5b80ac8c8a6e05fcdd574a5fb5ffeac5a9982b9e89"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1293d7febb995e9d3ec3ea09caf1a26214eec45b0f29f6074abb004723fc1de8"},
- {file = "pydantic_core-2.33.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:99b56acd433386c8f20be5c4000786d1e7ca0523c8eefc995d14d79c7a081498"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35a5ec3fa8c2fe6c53e1b2ccc2454398f95d5393ab398478f53e1afbbeb4d939"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b172f7b9d2f3abc0efd12e3386f7e48b576ef309544ac3a63e5e9cdd2e24585d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9097b9f17f91eea659b9ec58148c0747ec354a42f7389b9d50701610d86f812e"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cc77ec5b7e2118b152b0d886c7514a4653bcb58c6b1d760134a9fab915f777b3"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3d15245b08fa4a84cefc6c9222e6f37c98111c8679fbd94aa145f9a0ae23d"},
- {file = "pydantic_core-2.33.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ef99779001d7ac2e2461d8ab55d3373fe7315caefdbecd8ced75304ae5a6fc6b"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fc6bf8869e193855e8d91d91f6bf59699a5cdfaa47a404e278e776dd7f168b39"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:b1caa0bc2741b043db7823843e1bde8aaa58a55a58fda06083b0569f8b45693a"},
- {file = "pydantic_core-2.33.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ec259f62538e8bf364903a7d0d0239447059f9434b284f5536e8402b7dd198db"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win32.whl", hash = "sha256:e14f369c98a7c15772b9da98987f58e2b509a93235582838bd0d1d8c08b68fda"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_amd64.whl", hash = "sha256:1c607801d85e2e123357b3893f82c97a42856192997b95b4d8325deb1cd0c5f4"},
- {file = "pydantic_core-2.33.1-cp312-cp312-win_arm64.whl", hash = "sha256:8d13f0276806ee722e70a1c93da19748594f19ac4299c7e41237fc791d1861ea"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:70af6a21237b53d1fe7b9325b20e65cbf2f0a848cf77bed492b029139701e66a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:282b3fe1bbbe5ae35224a0dbd05aed9ccabccd241e8e6b60370484234b456266"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b315e596282bbb5822d0c7ee9d255595bd7506d1cb20c2911a4da0b970187d3"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1dfae24cf9921875ca0ca6a8ecb4bb2f13c855794ed0d468d6abbec6e6dcd44a"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6dd8ecfde08d8bfadaea669e83c63939af76f4cf5538a72597016edfa3fad516"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2f593494876eae852dc98c43c6f260f45abdbfeec9e4324e31a481d948214764"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:948b73114f47fd7016088e5186d13faf5e1b2fe83f5e320e371f035557fd264d"},
- {file = "pydantic_core-2.33.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e11f3864eb516af21b01e25fac915a82e9ddad3bb0fb9e95a246067398b435a4"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:549150be302428b56fdad0c23c2741dcdb5572413776826c965619a25d9c6bde"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:495bc156026efafd9ef2d82372bd38afce78ddd82bf28ef5276c469e57c0c83e"},
- {file = "pydantic_core-2.33.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ec79de2a8680b1a67a07490bddf9636d5c2fab609ba8c57597e855fa5fa4dacd"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win32.whl", hash = "sha256:ee12a7be1742f81b8a65b36c6921022301d466b82d80315d215c4c691724986f"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_amd64.whl", hash = "sha256:ede9b407e39949d2afc46385ce6bd6e11588660c26f80576c11c958e6647bc40"},
- {file = "pydantic_core-2.33.1-cp313-cp313-win_arm64.whl", hash = "sha256:aa687a23d4b7871a00e03ca96a09cad0f28f443690d300500603bd0adba4b523"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:401d7b76e1000d0dd5538e6381d28febdcacb097c8d340dde7d7fc6e13e9f95d"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aeb055a42d734c0255c9e489ac67e75397d59c6fbe60d155851e9782f276a9c"},
- {file = "pydantic_core-2.33.1-cp313-cp313t-win_amd64.whl", hash = "sha256:338ea9b73e6e109f15ab439e62cb3b78aa752c7fd9536794112e14bee02c8d18"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:5ab77f45d33d264de66e1884fca158bc920cb5e27fd0764a72f72f5756ae8bdb"},
- {file = "pydantic_core-2.33.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7aaba1b4b03aaea7bb59e1b5856d734be011d3e6d98f5bcaa98cb30f375f2ad"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fb66263e9ba8fea2aa85e1e5578980d127fb37d7f2e292773e7bc3a38fb0c7b"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f2648b9262607a7fb41d782cc263b48032ff7a03a835581abbf7a3bec62bcf5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:723c5630c4259400818b4ad096735a829074601805d07f8cafc366d95786d331"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d100e3ae783d2167782391e0c1c7a20a31f55f8015f3293647544df3f9c67824"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177d50460bc976a0369920b6c744d927b0ecb8606fb56858ff542560251b19e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3edde68d1a1f9af1273b2fe798997b33f90308fb6d44d8550c89fc6a3647cf6"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a62c3c3ef6a7e2c45f7853b10b5bc4ddefd6ee3cd31024754a1a5842da7d598d"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:c91dbb0ab683fa0cd64a6e81907c8ff41d6497c346890e26b23de7ee55353f96"},
- {file = "pydantic_core-2.33.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f466e8bf0a62dc43e068c12166281c2eca72121dd2adc1040f3aa1e21ef8599"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win32.whl", hash = "sha256:ab0277cedb698749caada82e5d099dc9fed3f906a30d4c382d1a21725777a1e5"},
- {file = "pydantic_core-2.33.1-cp39-cp39-win_amd64.whl", hash = "sha256:5773da0ee2d17136b1f1c6fbde543398d452a6ad2a7b54ea1033e2daa739b8d2"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c834f54f8f4640fd7e4b193f80eb25a0602bba9e19b3cd2fc7ffe8199f5ae02"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:049e0de24cf23766f12cc5cc71d8abc07d4a9deb9061b334b62093dedc7cb068"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a28239037b3d6f16916a4c831a5a0eadf856bdd6d2e92c10a0da3a59eadcf3e"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d3da303ab5f378a268fa7d45f37d7d85c3ec19769f28d2cc0c61826a8de21fe"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:25626fb37b3c543818c14821afe0fd3830bc327a43953bc88db924b68c5723f1"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3ab2d36e20fbfcce8f02d73c33a8a7362980cff717926bbae030b93ae46b56c7"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2f9284e11c751b003fd4215ad92d325d92c9cb19ee6729ebd87e3250072cdcde"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:048c01eee07d37cbd066fc512b9d8b5ea88ceeb4e629ab94b3e56965ad655add"},
- {file = "pydantic_core-2.33.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5ccd429694cf26af7997595d627dd2637e7932214486f55b8a357edaac9dae8c"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3a371dc00282c4b84246509a5ddc808e61b9864aa1eae9ecc92bb1268b82db4a"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f59295ecc75a1788af8ba92f2e8c6eeaa5a94c22fc4d151e8d9638814f85c8fc"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08530b8ac922003033f399128505f513e30ca770527cc8bbacf75a84fcc2c74b"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae370459da6a5466978c0eacf90690cb57ec9d533f8e63e564ef3822bfa04fe"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e3de2777e3b9f4d603112f78006f4ae0acb936e95f06da6cb1a45fbad6bdb4b5"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3a64e81e8cba118e108d7126362ea30e021291b7805d47e4896e52c791be2761"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:52928d8c1b6bda03cc6d811e8923dffc87a2d3c8b3bfd2ce16471c7147a24850"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:1b30d92c9412beb5ac6b10a3eb7ef92ccb14e3f2a8d7732e2d739f58b3aa7544"},
- {file = "pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7edbc454a29fc6aeae1e1eecba4f07b63b8d76e76a748532233c4c167b4cb9ea"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ad05b683963f69a1d5d2c2bdab1274a31221ca737dbbceaa32bcb67359453cdd"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df6a94bf9452c6da9b5d76ed229a5683d0306ccb91cca8e1eea883189780d568"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7965c13b3967909a09ecc91f21d09cfc4576bf78140b988904e94f130f188396"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3f1fdb790440a34f6ecf7679e1863b825cb5ffde858a9197f851168ed08371e5"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:5277aec8d879f8d05168fdd17ae811dd313b8ff894aeeaf7cd34ad28b4d77e33"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8ab581d3530611897d863d1a649fb0644b860286b4718db919bfd51ece41f10b"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0483847fa9ad5e3412265c1bd72aad35235512d9ce9d27d81a56d935ef489672"},
- {file = "pydantic_core-2.33.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:de9e06abe3cc5ec6a2d5f75bc99b0bdca4f5c719a5b34026f8c57efbdecd2ee3"},
- {file = "pydantic_core-2.33.1.tar.gz", hash = "sha256:bcc9c6fdb0ced789245b02b7d6603e17d1563064ddcfc36f046b61c0c05dd9df"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"},
+ {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"},
+ {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"},
+ {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"},
+ {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"},
+ {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"},
+ {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"},
+ {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"},
+ {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"},
+ {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"},
]
[package.dependencies]
@@ -1729,13 +1729,13 @@ files = [
[[package]]
name = "replicate"
-version = "1.0.4"
+version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
files = [
- {file = "replicate-1.0.4-py3-none-any.whl", hash = "sha256:f568f6271ff715067901b6094c23c37373bbcfd7de0ff9b85e9c9ead567e09e7"},
- {file = "replicate-1.0.4.tar.gz", hash = "sha256:f718601863ef1f419aa7dcdab1ea8770ba5489b571b86edf840cd506d68758ef"},
+ {file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
+ {file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index ad96beec..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.35"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 27a32c92..8d531f66 100644
--- a/reference.md
+++ b/reference.md
@@ -56,7 +56,7 @@ client.prompts.log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The prompt configuration to use. Two formats are supported:
+- A `'PromptKernelRequest'` object containing the prompt configuration
+- A string containing a serialized .prompt file
+A new Prompt version will be created if the provided details are new.
@@ -1501,7 +1516,7 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-
-**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+**reasoning_effort:** `typing.Optional[PromptRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
@@ -2518,8 +2533,7 @@ client.prompts.update_monitoring(
-## Tools
-client.tools.log(...)
+client.prompts.serialize(...)
-
@@ -2531,15 +2545,13 @@ client.prompts.update_monitoring(
-
-Log to a Tool.
+Serialize a Prompt to the .prompt file format.
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Tool. Otherwise the default deployed version will be chosen.
+Useful for storing the Prompt with your code in a version control system,
+or for editing with an AI tool.
-Instead of targeting an existing version explicitly, you can instead pass in
-Tool details in the request body. In this case, we will check if the details correspond
-to an existing version of the Tool, if not we will create a new version. This is helpful
-in the case where you are storing or deriving your Tool details in code.
+By default, the deployed version of the Prompt is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Prompt.
@@ -2559,24 +2571,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
+client.prompts.serialize(
+ id="id",
)
```
@@ -2593,7 +2589,7 @@ client.tools.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
+**id:** `str` — Unique identifier for Prompt.
@@ -2601,7 +2597,7 @@ client.tools.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Prompt to retrieve.
@@ -2609,7 +2605,7 @@ client.tools.log(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -2617,31 +2613,72 @@ client.tools.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.prompts.deserialize(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deserialize a Prompt from the .prompt file format.
+
+This returns a subset of the attributes required by a Prompt.
+This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+
+#### 🔌 Usage
+
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.deserialize(
+ prompt="prompt",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+-
+
+**prompt:** `str`
@@ -2649,15 +2686,78 @@ client.tools.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+## Tools
+client.tools.call(...)
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call a Tool.
+
+Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call.
@@ -2665,7 +2765,7 @@ client.tools.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call.
@@ -2673,7 +2773,7 @@ client.tools.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2681,7 +2781,7 @@ client.tools.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2689,7 +2789,7 @@ client.tools.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2721,7 +2821,7 @@ client.tools.log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2729,7 +2829,7 @@ client.tools.log(
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2737,7 +2837,7 @@ client.tools.log(
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -2745,7 +2845,7 @@ client.tools.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2753,7 +2853,7 @@ client.tools.log(
-
-**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -2761,7 +2861,7 @@ client.tools.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -2769,7 +2869,7 @@ client.tools.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -2777,7 +2877,15 @@ client.tools.log(
-
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -2797,7 +2905,7 @@ client.tools.log(
-client.tools.update(...)
+client.tools.log(...)
-
@@ -2809,9 +2917,15 @@ client.tools.log(
-
-Update a Log.
+Log to a Tool.
-Update the details of a Log with the given ID.
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Tool details in the request body. In this case, we will check if the details correspond
+to an existing version of the Tool, if not we will create a new version. This is helpful
+in the case where you are storing or deriving your Tool details in code.
@@ -2831,9 +2945,24 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update(
- id="id",
- log_id="log_id",
+client.tools.log(
+ path="math-tool",
+ tool={
+ "function": {
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "a": {"type": "number"},
+ "b": {"type": "number"},
+ },
+ "required": ["a", "b"],
+ },
+ }
+ },
+ inputs={"a": 5, "b": 7},
+ output="35",
)
```
@@ -2850,7 +2979,7 @@ client.tools.update(
-
-**id:** `str` — Unique identifier for Prompt.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to log to.
@@ -2858,7 +2987,7 @@ client.tools.update(
-
-**log_id:** `str` — Unique identifier for the Log.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -2866,7 +2995,7 @@ client.tools.update(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -2874,7 +3003,7 @@ client.tools.update(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**id:** `typing.Optional[str]` — ID for an existing Tool.
@@ -2882,7 +3011,7 @@ client.tools.update(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new.
@@ -2890,7 +3019,7 @@ client.tools.update(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2898,7 +3027,7 @@ client.tools.update(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -2906,7 +3035,7 @@ client.tools.update(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -2914,7 +3043,7 @@ client.tools.update(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -2922,7 +3051,7 @@ client.tools.update(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -2930,7 +3059,7 @@ client.tools.update(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -2938,7 +3067,7 @@ client.tools.update(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -2946,7 +3075,7 @@ client.tools.update(
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -2954,7 +3083,7 @@ client.tools.update(
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -2962,7 +3091,7 @@ client.tools.update(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
@@ -2970,74 +3099,31 @@ client.tools.update(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.tools.list(...)
-
-#### 📝 Description
-
-
--
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
-Get a list of all Tools.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.tools.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
-
-```
-
-
-
-
-
-#### ⚙️ Parameters
-
-
--
-
-
--
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3045,7 +3131,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
@@ -3053,7 +3139,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
@@ -3061,7 +3147,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+**tool_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
@@ -3069,7 +3155,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
@@ -3077,7 +3163,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
@@ -3097,7 +3183,7 @@ for page in response.iter_pages():
-client.tools.upsert(...)
+client.tools.update(...)
-
@@ -3109,13 +3195,9 @@ for page in response.iter_pages():
-
-Create a Tool or update it with a new version if it already exists.
-
-Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+Update a Log.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Tool - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Update the details of a Log with the given ID.
@@ -3135,19 +3217,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
+client.tools.update(
+ id="id",
+ log_id="log_id",
)
```
@@ -3164,7 +3236,7 @@ client.tools.upsert(
-
-**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Prompt.
@@ -3172,7 +3244,7 @@ client.tools.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Tool.
+**log_id:** `str` — Unique identifier for the Log.
@@ -3180,7 +3252,7 @@ client.tools.upsert(
-
-**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
@@ -3188,7 +3260,7 @@ client.tools.upsert(
-
-**source_code:** `typing.Optional[str]` — Code source of the Tool.
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
@@ -3196,7 +3268,7 @@ client.tools.upsert(
-
-**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+**error:** `typing.Optional[str]` — Error message if the log is an error.
@@ -3204,7 +3276,7 @@ client.tools.upsert(
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
@@ -3212,7 +3284,7 @@ client.tools.upsert(
-
-**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
@@ -3220,7 +3292,7 @@ client.tools.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
@@ -3228,7 +3300,7 @@ client.tools.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the Version.
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
@@ -3236,72 +3308,31 @@ client.tools.upsert(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
-
-
-
-
-
-
-
-client.tools.get(...)
-
--
-
-#### 📝 Description
-
-
--
-
-Retrieve the Tool with the given ID.
-
-By default, the deployed version of the Tool is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Tool.
-
-
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
-#### 🔌 Usage
-
-
--
-
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get(
- id="tl_789ghi",
-)
-
-```
-
-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -3309,7 +3340,7 @@ client.tools.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
@@ -3317,7 +3348,7 @@ client.tools.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
@@ -3337,7 +3368,7 @@ client.tools.get(
-client.tools.delete(...)
+client.tools.list(...)
-
@@ -3349,7 +3380,7 @@ client.tools.get(
-
-Delete the Tool with the given ID.
+Get a list of all Tools.
@@ -3369,9 +3400,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete(
- id="tl_789ghi",
+response = client.tools.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -3387,7 +3423,7 @@ client.tools.delete(
-
-**id:** `str` — Unique identifier for Tool.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -3395,70 +3431,23 @@ client.tools.delete(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Tools to fetch.
-
-
-
-
-
-
-
-
-client.tools.move(...)
-
--
-
-#### 📝 Description
-
-
--
-
-
--
-
-Move the Tool to a different path or change the name.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
-)
-
-```
-
-
+**name:** `typing.Optional[str]` — Case-insensitive filter for Tool name.
+
-#### ⚙️ Parameters
-
-
--
-
-
-**id:** `str` — Unique identifier for Tool.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
@@ -3466,7 +3455,7 @@ client.tools.move(
-
-**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
@@ -3474,7 +3463,7 @@ client.tools.move(
-
-**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -3494,7 +3483,7 @@ client.tools.move(
-client.tools.list_versions(...)
+client.tools.upsert(...)
-
@@ -3506,7 +3495,13 @@ client.tools.move(
-
-Get a list of all the versions of a Tool.
+Create a Tool or update it with a new version if it already exists.
+
+Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Tool - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -3526,8 +3521,19 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_versions(
- id="tl_789ghi",
+client.tools.upsert(
+ path="math-tool",
+ function={
+ "name": "multiply",
+ "description": "Multiply two numbers",
+ "parameters": {
+ "type": "object",
+ "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
+ "required": ["a", "b"],
+ },
+ },
+ version_name="math-tool-v1",
+ version_description="Simple math tool that multiplies two numbers",
)
```
@@ -3544,7 +3550,7 @@ client.tools.list_versions(
-
-**id:** `str` — Unique identifier for the Tool.
+**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -3552,7 +3558,63 @@ client.tools.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**id:** `typing.Optional[str]` — ID for an existing Tool.
+
+
+
+
+
+-
+
+**function:** `typing.Optional[ToolFunctionParams]` — Callable function specification of the Tool shown to the model for tool calling.
+
+
+
+
+
+-
+
+**source_code:** `typing.Optional[str]` — Code source of the Tool.
+
+
+
+
+
+-
+
+**setup_values:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/
+
+
+
+
+
+-
+
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used.
+
+
+
+
+
+-
+
+**tool_type:** `typing.Optional[FilesToolType]` — Type of Tool.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -3572,7 +3634,7 @@ client.tools.list_versions(
-client.tools.delete_tool_version(...)
+client.tools.get(...)
-
@@ -3584,7 +3646,10 @@ client.tools.list_versions(
-
-Delete a version of the Tool.
+Retrieve the Tool with the given ID.
+
+By default, the deployed version of the Tool is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Tool.
@@ -3604,9 +3669,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
+client.tools.get(
+ id="tl_789ghi",
)
```
@@ -3631,7 +3695,15 @@ client.tools.delete_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -3651,7 +3723,7 @@ client.tools.delete_tool_version(
-client.tools.update_tool_version(...)
+client.tools.delete(...)
-
@@ -3663,7 +3735,7 @@ client.tools.delete_tool_version(
-
-Update the name or description of the Tool version.
+Delete the Tool with the given ID.
@@ -3683,9 +3755,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_tool_version(
- id="id",
- version_id="version_id",
+client.tools.delete(
+ id="tl_789ghi",
)
```
@@ -3710,30 +3781,6 @@ client.tools.update_tool_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
-
-
-
-
-
--
-
-**description:** `typing.Optional[str]` — Description of the version.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3746,7 +3793,7 @@ client.tools.update_tool_version(
-client.tools.set_deployment(...)
+client.tools.move(...)
-
@@ -3758,10 +3805,7 @@ client.tools.update_tool_version(
-
-Deploy Tool to an Environment.
-
-Set the deployed version for the specified Environment. This Prompt
-will be used for calls made to the Tool in this Environment.
+Move the Tool to a different path or change the name.
@@ -3781,10 +3825,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.set_deployment(
+client.tools.move(
id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
+ path="new directory/new name",
)
```
@@ -3809,7 +3852,7 @@ client.tools.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Tool including the Tool name, which is used as a unique identifier.
@@ -3817,7 +3860,7 @@ client.tools.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Tool.
+**name:** `typing.Optional[str]` — Name of the Tool, which is used as a unique identifier.
@@ -3837,7 +3880,7 @@ client.tools.set_deployment(
-client.tools.remove_deployment(...)
+client.tools.list_versions(...)
-
@@ -3849,10 +3892,7 @@ client.tools.set_deployment(
-
-Remove deployed Tool from the Environment.
-
-Remove the deployed version for the specified Environment. This Tool
-will no longer be used for calls made to the Tool in this Environment.
+Get a list of all the versions of a Tool.
@@ -3872,9 +3912,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.remove_deployment(
+client.tools.list_versions(
id="tl_789ghi",
- environment_id="staging",
)
```
@@ -3891,7 +3930,7 @@ client.tools.remove_deployment(
-
-**id:** `str` — Unique identifier for Tool.
+**id:** `str` — Unique identifier for the Tool.
@@ -3899,7 +3938,7 @@ client.tools.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -3919,7 +3958,7 @@ client.tools.remove_deployment(
-client.tools.list_environments(...)
+client.tools.delete_tool_version(...)
-
@@ -3931,7 +3970,7 @@ client.tools.remove_deployment(
-
-List all Environments and their deployed versions for the Tool.
+Delete a version of the Tool.
@@ -3951,8 +3990,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.list_environments(
- id="tl_789ghi",
+client.tools.delete_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -3977,6 +4017,14 @@ client.tools.list_environments(
-
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3989,7 +4037,7 @@ client.tools.list_environments(
-client.tools.update_monitoring(...)
+client.tools.update_tool_version(...)
-
@@ -4001,10 +4049,7 @@ client.tools.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Tool.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Tool for monitoring purposes.
+Update the name or description of the Tool version.
@@ -4024,9 +4069,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.tools.update_tool_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -4043,7 +4088,7 @@ client.tools.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Tool.
@@ -4051,9 +4096,7 @@ client.tools.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4061,9 +4104,15 @@ client.tools.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
@@ -4083,8 +4132,7 @@ client.tools.update_monitoring(
-## Datasets
-client.datasets.list(...)
+client.tools.set_deployment(...)
-
@@ -4096,7 +4144,10 @@ client.tools.update_monitoring(
-
-List all Datasets.
+Deploy Tool to an Environment.
+
+Set the deployed version for the specified Environment. This Prompt
+will be used for calls made to the Tool in this Environment.
@@ -4116,14 +4167,11 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list(
- size=1,
+client.tools.set_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
+ version_id="tv_012jkl",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4139,7 +4187,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for Tool.
@@ -4147,7 +4195,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -4155,7 +4203,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+**version_id:** `str` — Unique identifier for the specific version of the Tool.
@@ -4163,47 +4211,40 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
-
+
+client.tools.remove_deployment(...)
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
-
-
-
+#### 📝 Description
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
-
-
-
+
+-
+Remove deployed Tool from the Environment.
+Remove the deployed version for the specified Environment. This Tool
+will no longer be used for calls made to the Tool in this Environment.
+
+
-
-
-client.datasets.upsert(...)
-
--
-#### 📝 Description
+#### 🔌 Usage
-
@@ -4211,70 +4252,15 @@ for page in response.iter_pages():
-
-Create a Dataset or update it with a new version if it already exists.
-
-Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
-
-By default, the new Dataset version will be set to the list of Datapoints provided in
-the request. You can also create a new version by adding or removing Datapoints from an existing version
-by specifying `action` as `add` or `remove` respectively. In this case, you may specify
-the `version_id` or `environment` query parameters to identify the existing version to base
-the new version on. If neither is provided, the latest created version will be used.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Dataset - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
-
-Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
-exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
-you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
-
-
-
-
-
-#### 🔌 Usage
-
-
--
-
-
--
-
-```python
-from humanloop import Humanloop
+```python
+from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upsert(
- path="datasets/support-queries",
- datapoints=[
- {
- "messages": [
- {
- "role": "user",
- "content": "How do i manage my organizations API keys?\n",
- }
- ],
- "target": {
- "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
- },
- },
- {
- "messages": [
- {
- "role": "user",
- "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
- }
- ],
- "target": {
- "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
- },
- },
- ],
- version_name="Initial version",
- version_description="Add two new questions and answers",
+client.tools.remove_deployment(
+ id="tl_789ghi",
+ environment_id="staging",
)
```
@@ -4291,7 +4277,7 @@ client.datasets.upsert(
-
-**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
+**id:** `str` — Unique identifier for Tool.
@@ -4299,7 +4285,7 @@ client.datasets.upsert(
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -4307,71 +4293,69 @@ client.datasets.upsert(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
-
+
+client.tools.list_environments(...)
-
-**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
+#### 📝 Description
-
-**id:** `typing.Optional[str]` — ID for an existing Dataset.
-
+
+-
+
+List all Environments and their deployed versions for the Tool.
+
+
+#### 🔌 Usage
+
-
-**action:** `typing.Optional[UpdateDatesetAction]`
+
+-
-The action to take with the provided Datapoints.
+```python
+from humanloop import Humanloop
- - If `"set"`, the created version will only contain the Datapoints provided in this request.
- - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
- - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.tools.list_environments(
+ id="tl_789ghi",
+)
-If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
-
+```
-
-
--
-
-**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
-
+#### ⚙️ Parameters
+
-
-**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
-
-
-
-
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**id:** `str` — Unique identifier for Tool.
@@ -4391,7 +4375,7 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-client.datasets.get(...)
+client.tools.update_monitoring(...)
-
@@ -4403,15 +4387,10 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-
-Retrieve the Dataset with the given ID.
-
-Unless `include_datapoints` is set to `true`, the response will not include
-the Datapoints.
-Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
-retrieve Datapoints for a large Dataset.
+Activate and deactivate Evaluators for monitoring the Tool.
-By default, the deployed version of the Dataset is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Dataset.
+An activated Evaluator will automatically be run on all new Logs
+within the Tool for monitoring purposes.
@@ -4431,10 +4410,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
+client.tools.update_monitoring(
+ id="tl_789ghi",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
)
```
@@ -4451,15 +4429,7 @@ client.datasets.get(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+**id:** `str`
@@ -4467,7 +4437,9 @@ client.datasets.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -4475,7 +4447,9 @@ client.datasets.get(
-
-**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -4495,24 +4469,10 @@ client.datasets.get(
-client.datasets.delete(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.get_environment_variables(...)
-
-Delete the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4527,7 +4487,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete(
+client.tools.get_environment_variables(
id="id",
)
@@ -4545,7 +4505,7 @@ client.datasets.delete(
-
-**id:** `str` — Unique identifier for Dataset.
+**id:** `str` — Unique identifier for File.
@@ -4565,7 +4525,7 @@ client.datasets.delete(
-client.datasets.move(...)
+client.tools.add_environment_variable(...)
-
@@ -4577,7 +4537,7 @@ client.datasets.delete(
-
-Move the Dataset to a different path or change the name.
+Add an environment variable to a Tool.
@@ -4597,8 +4557,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.move(
+client.tools.add_environment_variable(
id="id",
+ request=[{"name": "name", "value": "value"}],
)
```
@@ -4615,15 +4576,7 @@ client.datasets.move(
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+**id:** `str` — Unique identifier for Tool.
@@ -4631,7 +4584,7 @@ client.datasets.move(
-
-**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
+**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]`
@@ -4651,24 +4604,10 @@ client.datasets.move(
-client.datasets.list_datapoints(...)
-
--
-
-#### 📝 Description
-
-
--
-
+
client.tools.delete_environment_variable(...)
-
-List all Datapoints for the Dataset with the given ID.
-
-
-
-
-
#### 🔌 Usage
@@ -4683,15 +4622,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
+client.tools.delete_environment_variable(
+ id="id",
+ name="name",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -4707,31 +4641,7 @@ for page in response.iter_pages():
-
-**id:** `str` — Unique identifier for Dataset.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
-
-
-
-
-
--
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
-
-
-
-
-
--
-
-**page:** `typing.Optional[int]` — Page number for pagination.
+**id:** `str` — Unique identifier for File.
@@ -4739,7 +4649,7 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+**name:** `str` — Name of the Environment Variable to delete.
@@ -4759,7 +4669,8 @@ for page in response.iter_pages():
-client.datasets.list_versions(...)
+## Datasets
+client.datasets.list(...)
-
@@ -4771,7 +4682,7 @@ for page in response.iter_pages():
-
-Get a list of the versions for a Dataset.
+List all Datasets.
@@ -4791,9 +4702,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_versions(
- id="ds_b0baF1ca7652",
+response = client.datasets.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -4809,7 +4725,7 @@ client.datasets.list_versions(
-
-**id:** `str` — Unique identifier for Dataset.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -4817,7 +4733,39 @@ client.datasets.list_versions(
-
-**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -4837,7 +4785,7 @@ client.datasets.list_versions(
-client.datasets.delete_dataset_version(...)
+client.datasets.upsert(...)
-
@@ -4849,7 +4797,23 @@ client.datasets.list_versions(
-
-Delete a version of the Dataset.
+Create a Dataset or update it with a new version if it already exists.
+
+Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset.
+
+By default, the new Dataset version will be set to the list of Datapoints provided in
+the request. You can also create a new version by adding or removing Datapoints from an existing version
+by specifying `action` as `add` or `remove` respectively. In this case, you may specify
+the `version_id` or `environment` query parameters to identify the existing version to base
+the new version on. If neither is provided, the latest created version will be used.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Dataset - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already
+exists, it will be ignored. If you intentionally want to add a duplicate Datapoint,
+you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`.
@@ -4869,9 +4833,34 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
+client.datasets.upsert(
+ path="datasets/support-queries",
+ datapoints=[
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "How do i manage my organizations API keys?\n",
+ }
+ ],
+ "target": {
+ "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
+ },
+ },
+ {
+ "messages": [
+ {
+ "role": "user",
+ "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
+ }
+ ],
+ "target": {
+ "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
+ },
+ },
+ ],
+ version_name="Initial version",
+ version_description="Add two new questions and answers",
)
```
@@ -4888,7 +4877,7 @@ client.datasets.delete_dataset_version(
-
-**id:** `str` — Unique identifier for Dataset.
+**datapoints:** `typing.Sequence[CreateDatapointRequestParams]` — The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used.
@@ -4896,7 +4885,7 @@ client.datasets.delete_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
@@ -4904,70 +4893,47 @@ client.datasets.delete_dataset_version(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`.
-
-
-
-
-
-
-
-client.datasets.update_dataset_version(...)
-
-#### 📝 Description
-
-
--
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
+
+
+
-
-Update the name or description of the Dataset version.
-
-
+**path:** `typing.Optional[str]` — Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
-#### 🔌 Usage
-
-
-
--
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
-)
-
-```
-
-
+**id:** `typing.Optional[str]` — ID for an existing Dataset.
+
-#### ⚙️ Parameters
-
-
-
--
+**action:** `typing.Optional[UpdateDatesetAction]`
-**id:** `str` — Unique identifier for Dataset.
+The action to take with the provided Datapoints.
+
+ - If `"set"`, the created version will only contain the Datapoints provided in this request.
+ - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version.
+ - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request.
+
+If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided.
@@ -4975,7 +4941,7 @@ client.datasets.update_dataset_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used.
@@ -4983,7 +4949,7 @@ client.datasets.update_dataset_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset.
@@ -4991,7 +4957,7 @@ client.datasets.update_dataset_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -5011,7 +4977,7 @@ client.datasets.update_dataset_version(
-client.datasets.upload_csv(...)
+client.datasets.get(...)
-
@@ -5023,17 +4989,15 @@ client.datasets.update_dataset_version(
-
-Add Datapoints from a CSV file to a Dataset.
-
-This will create a new version of the Dataset with the Datapoints from the CSV file.
+Retrieve the Dataset with the given ID.
-If either `version_id` or `environment` is provided, the new version will be based on the specified version,
-with the Datapoints from the CSV file added to the existing Datapoints in the version.
-If neither `version_id` nor `environment` is provided, the new version will be based on the version
-of the Dataset that is deployed to the default Environment.
+Unless `include_datapoints` is set to `true`, the response will not include
+the Datapoints.
+Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently
+retrieve Datapoints for a large Dataset.
-You can optionally provide a name and description for the new version using `version_name`
-and `version_description` parameters.
+By default, the deployed version of the Dataset is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Dataset.
@@ -5053,8 +5017,10 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.upload_csv(
- id="id",
+client.datasets.get(
+ id="ds_b0baF1ca7652",
+ version_id="dsv_6L78pqrdFi2xa",
+ include_datapoints=True,
)
```
@@ -5071,25 +5037,7 @@ client.datasets.upload_csv(
-
-**id:** `str` — Unique identifier for the Dataset
-
-
-
-
-
--
-
-**file:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
+**id:** `str` — Unique identifier for Dataset.
@@ -5097,7 +5045,7 @@ core.File` — See core.File for more documentation
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
@@ -5105,7 +5053,7 @@ core.File` — See core.File for more documentation
-
-**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -5113,7 +5061,7 @@ core.File` — See core.File for more documentation
-
-**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
+**include_datapoints:** `typing.Optional[bool]` — If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead.
@@ -5133,7 +5081,7 @@ core.File` — See core.File for more documentation
-client.datasets.set_deployment(...)
+client.datasets.delete(...)
-
@@ -5145,9 +5093,7 @@ core.File` — See core.File for more documentation
-
-Deploy Dataset to Environment.
-
-Set the deployed version for the specified Environment.
+Delete the Dataset with the given ID.
@@ -5167,10 +5113,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
+client.datasets.delete(
+ id="id",
)
```
@@ -5195,22 +5139,6 @@ client.datasets.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Dataset.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5223,7 +5151,7 @@ client.datasets.set_deployment(
-client.datasets.remove_deployment(...)
+client.datasets.move(...)
-
@@ -5235,9 +5163,7 @@ client.datasets.set_deployment(
-
-Remove deployed Dataset from Environment.
-
-Remove the deployed version for the specified Environment.
+Move the Dataset to a different path or change the name.
@@ -5257,9 +5183,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
+client.datasets.move(
+ id="id",
)
```
@@ -5284,7 +5209,15 @@ client.datasets.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**path:** `typing.Optional[str]` — Path of the Dataset including the Dataset name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Dataset, which is used as a unique identifier.
@@ -5304,7 +5237,7 @@ client.datasets.remove_deployment(
-client.datasets.list_environments(...)
+client.datasets.list_datapoints(...)
-
@@ -5316,7 +5249,7 @@ client.datasets.remove_deployment(
-
-List all Environments and their deployed versions for the Dataset.
+List all Datapoints for the Dataset with the given ID.
@@ -5336,9 +5269,15 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.datasets.list_environments(
- id="id",
+response = client.datasets.list_datapoints(
+ id="ds_b0baF1ca7652",
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -5362,6 +5301,38 @@ client.datasets.list_environments(
-
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Dataset to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Datapoints to fetch.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5374,8 +5345,7 @@ client.datasets.list_environments(
-## Evaluators
-client.evaluators.log(...)
+client.datasets.list_versions(...)
-
@@ -5387,9 +5357,7 @@ client.datasets.list_environments(
-
-Submit Evaluator judgment for an existing Log.
-
-Creates a new Log. The evaluated Log will be set as the parent of the created Log.
+Get a list of the versions for a Dataset.
@@ -5409,8 +5377,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.log(
- parent_id="parent_id",
+client.datasets.list_versions(
+ id="ds_b0baF1ca7652",
)
```
@@ -5427,7 +5395,7 @@ client.evaluators.log(
-
-**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
+**id:** `str` — Unique identifier for Dataset.
@@ -5435,7 +5403,7 @@ client.evaluators.log(
-
-**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
+**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
@@ -5443,103 +5411,70 @@ client.evaluators.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
--
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
-
+
+client.datasets.delete_dataset_version(...)
-
-**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
-
-
-
+#### 📝 Description
-
-**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
-
-
-
-
-
-**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
-
+Delete a version of the Dataset.
-
-
--
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
-
+#### 🔌 Usage
+
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
-
-
-
-
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
-
-
-
+```python
+from humanloop import Humanloop
-
--
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.delete_dataset_version(
+ id="id",
+ version_id="version_id",
+)
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
-
+```
-
-
--
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
-
+#### ⚙️ Parameters
+
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
-
-
-
-
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+**id:** `str` — Unique identifier for Dataset.
@@ -5547,7 +5482,7 @@ client.evaluators.log(
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5555,71 +5490,70 @@ client.evaluators.log(
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
+
+client.datasets.update_dataset_version(...)
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
-
-
+#### 📝 Description
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
-
-
-
-
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
-
+Update the name or description of the Dataset version.
+
+
+
+#### 🔌 Usage
-
-**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
-
-
-
-
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.datasets.update_dataset_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
-
-
-
-
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+**id:** `str` — Unique identifier for Dataset.
@@ -5627,7 +5561,7 @@ client.evaluators.log(
-
-**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5635,7 +5569,7 @@ client.evaluators.log(
-
-**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -5643,7 +5577,7 @@ client.evaluators.log(
-
-**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
+**description:** `typing.Optional[str]` — Description of the version.
@@ -5663,7 +5597,7 @@ client.evaluators.log(
-client.evaluators.list(...)
+client.datasets.upload_csv(...)
-
@@ -5675,7 +5609,17 @@ client.evaluators.log(
-
-Get a list of all Evaluators.
+Add Datapoints from a CSV file to a Dataset.
+
+This will create a new version of the Dataset with the Datapoints from the CSV file.
+
+If either `version_id` or `environment` is provided, the new version will be based on the specified version,
+with the Datapoints from the CSV file added to the existing Datapoints in the version.
+If neither `version_id` nor `environment` is provided, the new version will be based on the version
+of the Dataset that is deployed to the default Environment.
+
+You can optionally provide a name and description for the new version using `version_name`
+and `version_description` parameters.
@@ -5695,14 +5639,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.evaluators.list(
- size=1,
+client.datasets.upload_csv(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -5718,7 +5657,7 @@ for page in response.iter_pages():
-
-**page:** `typing.Optional[int]` — Page offset for pagination.
+**id:** `str` — Unique identifier for the Dataset
@@ -5726,7 +5665,9 @@ for page in response.iter_pages():
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
+**file:** `from __future__ import annotations
+
+core.File` — See core.File for more documentation
@@ -5734,7 +5675,7 @@ for page in response.iter_pages():
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
+**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
@@ -5742,7 +5683,7 @@ for page in response.iter_pages():
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
@@ -5750,7 +5691,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**version_name:** `typing.Optional[str]` — Name for the new Dataset version.
@@ -5758,7 +5699,7 @@ for page in response.iter_pages():
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
@@ -5778,7 +5719,7 @@ for page in response.iter_pages():
-client.evaluators.upsert(...)
+client.datasets.set_deployment(...)
-
@@ -5790,13 +5731,9 @@ for page in response.iter_pages():
-
-Create an Evaluator or update it with a new version if it already exists.
-
-Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+Deploy Dataset to Environment.
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within an Evaluator - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Set the deployed version for the specified Environment.
@@ -5816,19 +5753,13 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
-)
-
-```
+client.datasets.set_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
+ version_id="dsv_6L78pqrdFi2xa",
+)
+
+```
@@ -5842,23 +5773,7 @@ client.evaluators.upsert(
-
-**spec:** `EvaluatorRequestSpecParams`
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
-
-
-
--
-
-**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -5866,7 +5781,7 @@ client.evaluators.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
@@ -5874,7 +5789,7 @@ client.evaluators.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**version_id:** `str` — Unique identifier for the specific version of the Dataset.
@@ -5894,7 +5809,7 @@ client.evaluators.upsert(
-client.evaluators.get(...)
+client.datasets.remove_deployment(...)
-
@@ -5906,10 +5821,9 @@ client.evaluators.upsert(
-
-Retrieve the Evaluator with the given ID.
+Remove deployed Dataset from Environment.
-By default, the deployed version of the Evaluator is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Evaluator.
+Remove the deployed version for the specified Environment.
@@ -5929,8 +5843,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.get(
- id="ev_890bcd",
+client.datasets.remove_deployment(
+ id="ds_b0baF1ca7652",
+ environment_id="staging",
)
```
@@ -5947,15 +5862,7 @@ client.evaluators.get(
-
-**id:** `str` — Unique identifier for Evaluator.
-
-
-
-
-
--
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+**id:** `str` — Unique identifier for Dataset.
@@ -5963,7 +5870,7 @@ client.evaluators.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -5983,7 +5890,7 @@ client.evaluators.get(
-client.evaluators.delete(...)
+client.datasets.list_environments(...)
-
@@ -5995,7 +5902,7 @@ client.evaluators.get(
-
-Delete the Evaluator with the given ID.
+List all Environments and their deployed versions for the Dataset.
@@ -6015,8 +5922,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.delete(
- id="ev_890bcd",
+client.datasets.list_environments(
+ id="id",
)
```
@@ -6033,7 +5940,7 @@ client.evaluators.delete(
-
-**id:** `str` — Unique identifier for Evaluator.
+**id:** `str` — Unique identifier for Dataset.
@@ -6053,7 +5960,8 @@ client.evaluators.delete(
-client.evaluators.move(...)
+## Evaluators
+client.evaluators.log(...)
-
@@ -6065,7 +5973,9 @@ client.evaluators.delete(
-
-Move the Evaluator to a different path or change the name.
+Submit Evaluator judgment for an existing Log.
+
+Creates a new Log. The evaluated Log will be set as the parent of the created Log.
@@ -6085,9 +5995,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
+client.evaluators.log(
+ parent_id="parent_id",
)
```
@@ -6104,7 +6013,7 @@ client.evaluators.move(
-
-**id:** `str` — Unique identifier for Evaluator.
+**parent_id:** `str` — Identifier of the evaluated Log. The newly created Log will have this one set as parent.
@@ -6112,7 +6021,7 @@ client.evaluators.move(
-
-**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
+**version_id:** `typing.Optional[str]` — ID of the Evaluator version to log against.
@@ -6120,7 +6029,7 @@ client.evaluators.move(
-
-**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
@@ -6128,69 +6037,79 @@ client.evaluators.move(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
-
+
+-
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
-
-client.evaluators.list_versions(...)
-
-#### 📝 Description
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
-
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
-
-Get a list of all the versions of an Evaluator.
-
-
+**output:** `typing.Optional[str]` — Generated output from the LLM. Only populated for LLM Evaluator Logs.
+
-#### 🔌 Usage
-
-
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
-
-```python
-from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_versions(
- id="ev_890bcd",
-)
-
-```
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
-#### ⚙️ Parameters
-
-
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
-
-**id:** `str` — Unique identifier for the Evaluator.
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. Only populated for LLM Evaluator Logs.
@@ -6198,7 +6117,7 @@ client.evaluators.list_versions(
-
-**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. Only populated for LLM Evaluator Logs.
@@ -6206,70 +6125,103 @@ client.evaluators.list_versions(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+-
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
-
-client.evaluators.delete_evaluator_version(...)
-
-#### 📝 Description
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
-
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
-
-Delete a version of the Evaluator.
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
-#### 🔌 Usage
-
-
+**create_evaluator_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
+
+-
-```
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the LLM. Only populated for LLM Evaluator Logs.
+
-#### ⚙️ Parameters
-
-
+**judgment:** `typing.Optional[CreateEvaluatorLogRequestJudgmentParams]` — Evaluator assessment of the Log.
+
+
+
+
-
-**id:** `str` — Unique identifier for Evaluator.
+**marked_completed:** `typing.Optional[bool]` — Whether the Log has been manually marked as completed by a user.
@@ -6277,7 +6229,7 @@ client.evaluators.delete_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**spec:** `typing.Optional[CreateEvaluatorLogRequestSpecParams]`
@@ -6297,7 +6249,7 @@ client.evaluators.delete_evaluator_version(
-client.evaluators.update_evaluator_version(...)
+client.evaluators.list(...)
-
@@ -6309,7 +6261,7 @@ client.evaluators.delete_evaluator_version(
-
-Update the name or description of the Evaluator version.
+Get a list of all Evaluators.
@@ -6329,10 +6281,14 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
+response = client.evaluators.list(
+ size=1,
)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
```
@@ -6348,7 +6304,7 @@ client.evaluators.update_evaluator_version(
-
-**id:** `str` — Unique identifier for Evaluator.
+**page:** `typing.Optional[int]` — Page offset for pagination.
@@ -6356,7 +6312,7 @@ client.evaluators.update_evaluator_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Evaluators to fetch.
@@ -6364,7 +6320,7 @@ client.evaluators.update_evaluator_version(
-
-**name:** `typing.Optional[str]` — Name of the version.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Evaluator name.
@@ -6372,7 +6328,23 @@ client.evaluators.update_evaluator_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -6392,7 +6364,7 @@ client.evaluators.update_evaluator_version(
-client.evaluators.set_deployment(...)
+client.evaluators.upsert(...)
-
@@ -6404,10 +6376,13 @@ client.evaluators.update_evaluator_version(
-
-Deploy Evaluator to an Environment.
+Create an Evaluator or update it with a new version if it already exists.
-Set the deployed version for the specified Environment. This Evaluator
-will be used for calls made to the Evaluator in this Environment.
+Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Evaluator - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -6427,10 +6402,16 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
+client.evaluators.upsert(
+ path="Shared Evaluators/Accuracy Evaluator",
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "number",
+ "evaluator_type": "python",
+ "code": "def evaluate(answer, target):\n return 0.5",
+ },
+ version_name="simple-evaluator",
+ version_description="Simple evaluator that returns 0.5",
)
```
@@ -6447,7 +6428,7 @@ client.evaluators.set_deployment(
-
-**id:** `str` — Unique identifier for Evaluator.
+**spec:** `EvaluatorRequestSpecParams`
@@ -6455,7 +6436,7 @@ client.evaluators.set_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+**path:** `typing.Optional[str]` — Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -6463,7 +6444,23 @@ client.evaluators.set_deployment(
-
-**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+**id:** `typing.Optional[str]` — ID for an existing Evaluator.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -6483,7 +6480,7 @@ client.evaluators.set_deployment(
-client.evaluators.remove_deployment(...)
+client.evaluators.get(...)
-
@@ -6495,10 +6492,10 @@ client.evaluators.set_deployment(
-
-Remove deployed Evaluator from the Environment.
+Retrieve the Evaluator with the given ID.
-Remove the deployed version for the specified Environment. This Evaluator
-will no longer be used for calls made to the Evaluator in this Environment.
+By default, the deployed version of the Evaluator is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Evaluator.
@@ -6518,9 +6515,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.remove_deployment(
+client.evaluators.get(
id="ev_890bcd",
- environment_id="staging",
)
```
@@ -6545,7 +6541,15 @@ client.evaluators.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Evaluator to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -6565,7 +6569,7 @@ client.evaluators.remove_deployment(
-client.evaluators.list_environments(...)
+client.evaluators.delete(...)
-
@@ -6577,7 +6581,7 @@ client.evaluators.remove_deployment(
-
-List all Environments and their deployed versions for the Evaluator.
+Delete the Evaluator with the given ID.
@@ -6597,7 +6601,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.list_environments(
+client.evaluators.delete(
id="ev_890bcd",
)
@@ -6635,7 +6639,7 @@ client.evaluators.list_environments(
-client.evaluators.update_monitoring(...)
+client.evaluators.move(...)
-
@@ -6647,10 +6651,7 @@ client.evaluators.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Evaluator.
-
-An activated Evaluator will automatically be run on all new Logs
-within the Evaluator for monitoring purposes.
+Move the Evaluator to a different path or change the name.
@@ -6670,8 +6671,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.evaluators.update_monitoring(
- id="id",
+client.evaluators.move(
+ id="ev_890bcd",
+ path="new directory/new name",
)
```
@@ -6688,7 +6690,7 @@ client.evaluators.update_monitoring(
-
-**id:** `str`
+**id:** `str` — Unique identifier for Evaluator.
@@ -6696,9 +6698,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**path:** `typing.Optional[str]` — Path of the Evaluator including the Evaluator name, which is used as a unique identifier.
@@ -6706,9 +6706,7 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**name:** `typing.Optional[str]` — Name of the Evaluator, which is used as a unique identifier.
@@ -6728,8 +6726,7 @@ client.evaluators.update_monitoring(
-## Flows
-client.flows.log(...)
+client.evaluators.list_versions(...)
-
@@ -6741,13 +6738,7 @@ client.evaluators.update_monitoring(
-
-Log to a Flow.
-
-You can use query parameters `version_id`, or `environment`, to target
-an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
+Get a list of all the versions of an Evaluator.
@@ -6762,40 +6753,13 @@ in order to trigger Evaluators.
-
```python
-import datetime
-
from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
- ),
+client.evaluators.list_versions(
+ id="ev_890bcd",
)
```
@@ -6812,7 +6776,7 @@ client.flows.log(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+**id:** `str` — Unique identifier for the Evaluator.
@@ -6820,7 +6784,7 @@ client.flows.log(
-
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -6828,31 +6792,70 @@ client.flows.log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.evaluators.delete_evaluator_version(...)
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Evaluator.
+
+
+#### 🔌 Usage
+
-
-**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.delete_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
@@ -6860,7 +6863,7 @@ client.flows.log(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
@@ -6868,15 +6871,70 @@ client.flows.log(
-
-**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+client.evaluators.update_evaluator_version(...)
-
-**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Evaluator version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_evaluator_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
@@ -6884,7 +6942,7 @@ client.flows.log(
-
-**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
@@ -6892,7 +6950,7 @@ client.flows.log(
-
-**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -6900,7 +6958,3044 @@ client.flows.log(
-
-**error:** `typing.Optional[str]` — Error message if the log is an error.
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Evaluator to an Environment.
+
+Set the deployed version for the specified Environment. This Evaluator
+will be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.set_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+ version_id="evv_012def",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Evaluator from the Environment.
+
+Remove the deployed version for the specified Environment. This Evaluator
+will no longer be used for calls made to the Evaluator in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.remove_deployment(
+ id="ev_890bcd",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Evaluator.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.list_environments(
+ id="ev_890bcd",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Evaluator.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.evaluators.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Evaluator.
+
+An activated Evaluator will automatically be run on all new Logs
+within the Evaluator for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.evaluators.update_monitoring(
+ id="id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Flows
+client.flows.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Log to a Flow.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Flow. Otherwise, the default deployed version will be chosen.
+
+If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+import datetime
+
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.log(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ flow={
+ "attributes": {
+ "prompt": {
+ "template": "You are a helpful assistant helping with medical anamnesis",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ }
+ },
+ inputs={
+ "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="incomplete",
+ start_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:35+00:00",
+ ),
+ end_time=datetime.datetime.fromisoformat(
+ "2024-07-08 21:40:39+00:00",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — The start time of the Trace. Will be updated if a child Log with an earlier start time is added.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — The end time of the Trace. Will be updated if a child Log with a later end time is added.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the status, inputs, output of a Flow Log.
+
+Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
+Inputs and output (or error) must be provided in order to mark it as complete.
+
+The end_time log attribute will be set to match the time the log is marked as complete.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_log(
+ log_id="medqa_experiment_0001",
+ inputs={
+ "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
+ },
+ output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
+ log_status="complete",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — Unique identifier of the Flow Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.get(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Retrieve the Flow with the given ID.
+
+By default, the deployed version of the Flow is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.get(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete the Flow with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.move(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Move the Flow to a different path or change the name.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.move(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ path="new directory/new name",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the Flow.
+
+
+
+
+
+-
+
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of Flows.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.flows.list(
+ size=1,
+)
+for item in response:
+ yield item
+# alternatively, you can paginate page-by-page
+for page in response.iter_pages():
+ yield page
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
+
+
+
+
+
+-
+
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
+
+
+
+
+
+-
+
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+
+
+
+
+-
+
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+
+
+
+
+
+-
+
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.upsert(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create or update a Flow.
+
+Flows can also be identified by the `ID` or their `path`.
+
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within a Flow - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.upsert(
+ path="Personal Projects/MedQA Flow",
+ attributes={
+ "prompt": {
+ "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
+ "model": "gpt-4o",
+ "temperature": 0.8,
+ },
+ "tool": {
+ "name": "retrieval_tool_v3",
+ "description": "Retrieval tool for MedQA.",
+ "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
+ },
+ "version_name": "medqa-flow-v1",
+ "version_description": "Initial version",
+ },
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Flow.
+
+
+
+
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+
+
+
+
+
+-
+
+**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_versions(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of all the versions of a Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_versions(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.delete_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Delete a version of the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.delete_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_flow_version(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update the name or description of the Flow version.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_flow_version(
+ id="id",
+ version_id="version_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**name:** `typing.Optional[str]` — Name of the version.
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the version.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.set_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Deploy Flow to an Environment.
+
+Set the deployed version for the specified Environment. This Flow
+will be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.set_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+ version_id="flv_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.remove_deployment(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Remove deployed Flow from the Environment.
+
+Remove the deployed version for the specified Environment. This Flow
+will no longer be used for calls made to the Flow in this Environment.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.remove_deployment(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ environment_id="staging",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.list_environments(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+List all Environments and their deployed versions for the Flow.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.list_environments(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Flow.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.flows.update_monitoring(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Activate and deactivate Evaluators for monitoring the Flow.
+
+An activated Evaluator will automatically be run on all new "completed" Logs
+within the Flow for monitoring purposes.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.flows.update_monitoring(
+ id="fl_6o701g4jmcanPVHxdqD0O",
+ activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str`
+
+
+
+
+
+-
+
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+## Agents
+client.agents.log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create an Agent Log.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+in order to trigger Evaluators.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.log()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider.
+
+
+
+
+
+-
+
+**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output.
+
+
+
+
+
+-
+
+**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output.
+
+
+
+
+
+-
+
+**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model.
+
+
+
+
+
+-
+
+**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt.
+
+
+
+
+
+-
+
+**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output.
+
+
+
+
+
+-
+
+**finish_reason:** `typing.Optional[str]` — Reason the generation finished.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — Error message if the log is an error.
+
+
+
+
+
+-
+
+**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+
+
+
+
+
+-
+
+**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+
+
+
+
+
+-
+
+**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+
+
+
+
+
+-
+
+**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.update_log(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Update a Log.
+
+Update the details of a Log with the given ID.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.update_log(
+ id="id",
+ log_id="log_id",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**log_id:** `str` — Unique identifier for the Log.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+
+
+
+
+
+-
+
+**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+
+
+
+
+
+-
+
+**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+
+
+
+
+-
+
+**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.call_stream()
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.call(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Call an Agent.
+
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
+
+You can use query parameters `version_id`, or `environment`, to target
+an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+Instead of targeting an existing version explicitly, you can instead pass in
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.call()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to.
+
+
+
+
+
+-
+
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+
+
+
+
+-
+
+**id:** `typing.Optional[str]` — ID for an existing Agent.
+
+
+
+
+
+-
+
+**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint.
+
+
+
+
+
+-
+
+**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]`
+
+Controls how the model uses tools. The following options are supported:
+- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+- `'required'` means the model must call one or more of the provided tools.
+- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+
+
+
+
+-
+
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The agent configuration to use. Two formats are supported:
+- An `AgentKernelRequest` object containing the agent configuration
+- A string containing a serialized .agent file
+A new Agent version will be created if the provided details are new.
+
+
+
+
+
+-
+
+**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
+
+
+
+
+
+-
+
+**source:** `typing.Optional[str]` — Identifies where the model was called from.
+
+
+
+
+
+-
+
+**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
+
+
+
+
+
+-
+
+**start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
+
+
+
+
+
+-
+
+**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended.
+
+
+
+
+
+-
+
+**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+
+
+
+
+-
+
+**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+
+
+
+
+-
+
+**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+
+
+
+
+-
+
+**user:** `typing.Optional[str]` — End-user ID related to the Log.
+
+
+
+
+
+-
+
+**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+
+
+
+
+
+-
+
+**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+
+
+
+
+
+-
+
+**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.continue_stream(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+)
+for chunk in response.data:
+ yield chunk
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — This identifies the Agent Log to continue.
+
+
+
+
+
+-
+
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+
+
+
+
+-
+
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+
+
+
+
+-
+
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.agents.continue_(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Continue an incomplete Agent call.
+
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
+
+The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**log_id:** `str` — This identifies the Agent Log to continue.
@@ -6908,7 +10003,7 @@ client.flows.log(
-
-**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds.
+**messages:** `typing.Sequence[ChatMessageParams]` — The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
@@ -6916,7 +10011,7 @@ client.flows.log(
-
-**stdout:** `typing.Optional[str]` — Captured log and debug statements.
+**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
@@ -6924,7 +10019,7 @@ client.flows.log(
-
-**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the returned Agent Log. Defaults to false.
@@ -6932,55 +10027,67 @@ client.flows.log(
-
-**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider.
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
--
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template.
-
+
+client.agents.list(...)
-
-**source:** `typing.Optional[str]` — Identifies where the model was called from.
-
-
-
+#### 📝 Description
-
-**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record.
-
-
-
-
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
-
+Get a list of all Agents.
+
+
+#### 🔌 Usage
+
-
-**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
-
+
+-
+
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.list()
+
+```
+
+
+#### ⚙️ Parameters
+
-
-**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace.
+
+-
+
+**page:** `typing.Optional[int]` — Page number for pagination.
@@ -6988,7 +10095,7 @@ client.flows.log(
-
-**user:** `typing.Optional[str]` — End-user ID related to the Log.
+**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch.
@@ -6996,7 +10103,7 @@ client.flows.log(
-
-**flow_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to.
+**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name.
@@ -7004,7 +10111,7 @@ client.flows.log(
-
-**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop.
+**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
@@ -7012,7 +10119,7 @@ client.flows.log(
-
-**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
@@ -7020,7 +10127,7 @@ client.flows.log(
-
-**flow:** `typing.Optional[FlowKernelRequestParams]` — Flow used to generate the Trace.
+**order:** `typing.Optional[SortOrder]` — Direction to sort by.
@@ -7040,7 +10147,7 @@ client.flows.log(
-client.flows.update_log(...)
+client.agents.upsert(...)
-
@@ -7052,12 +10159,14 @@ client.flows.log(
-
-Update the status, inputs, output of a Flow Log.
+Create an Agent or update it with a new version if it already exists.
-Marking a Flow Log as complete will trigger any monitoring Evaluators to run.
-Inputs and output (or error) must be provided in order to mark it as complete.
+Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+tools determine the versions of the Agent.
-The end_time log attribute will be set to match the time the log is marked as complete.
+You can provide `version_name` and `version_description` to identify and describe your versions.
+Version names must be unique within an Agent - attempting to create a version with a name
+that already exists will result in a 409 Conflict error.
@@ -7077,13 +10186,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
+client.agents.upsert(
+ model="model",
)
```
@@ -7100,7 +10204,7 @@ client.flows.update_log(
-
-**log_id:** `str` — Unique identifier of the Flow Log.
+**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -7108,7 +10212,7 @@ client.flows.update_log(
-
-**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow.
+**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
@@ -7116,7 +10220,7 @@ client.flows.update_log(
-
-**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow.
+**id:** `typing.Optional[str]` — ID for an existing Agent.
@@ -7124,7 +10228,7 @@ client.flows.update_log(
-
-**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log.
+**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used.
@@ -7132,7 +10236,14 @@ client.flows.update_log(
-
-**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+**template:** `typing.Optional[AgentRequestTemplateParams]`
+
+The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+For completion models, provide a prompt template as a string.
+
+Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
@@ -7140,7 +10251,7 @@ client.flows.update_log(
-
-**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template.
@@ -7148,7 +10259,7 @@ client.flows.update_log(
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service.
@@ -7156,72 +10267,127 @@ client.flows.update_log(
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+
+-
+
+**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+
+-
+**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
-
-client.flows.get(...)
-
-#### 📝 Description
+**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+
+
-
+**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+
+
+
-
-Retrieve the Flow with the given ID.
+**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+
+
-By default, the deployed version of the Flow is returned. Use the query parameters
-`version_id` or `environment` to target a specific version of the Flow.
+
+-
+
+**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call.
+
+
+
+-
+
+**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
-#### 🔌 Usage
+
+-
+
+**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+
+
-
+**reasoning_effort:** `typing.Optional[AgentRequestReasoningEffortParams]` — Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+
+
+
-
-```python
-from humanloop import Humanloop
+**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]`
+
+
+
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+
+-
-```
+**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+
+
+-
+
+**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
-#### ⚙️ Parameters
+
+-
+
+**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+
+
-
+**version_description:** `typing.Optional[str]` — Description of the Version.
+
+
+
+
-
-**id:** `str` — Unique identifier for Flow.
+**description:** `typing.Optional[str]` — Description of the Prompt.
@@ -7229,7 +10395,7 @@ client.flows.get(
-
-**version_id:** `typing.Optional[str]` — A specific Version ID of the Flow to retrieve.
+**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt.
@@ -7237,7 +10403,7 @@ client.flows.get(
-
-**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+**readme:** `typing.Optional[str]` — Long description of the Prompt.
@@ -7257,7 +10423,7 @@ client.flows.get(
-client.flows.delete(...)
+client.agents.delete_agent_version(...)
-
@@ -7269,7 +10435,7 @@ client.flows.get(
-
-Delete the Flow with the given ID.
+Delete a version of the Agent.
@@ -7289,8 +10455,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7307,7 +10474,15 @@ client.flows.delete(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7327,7 +10502,7 @@ client.flows.delete(
-client.flows.move(...)
+client.agents.patch_agent_version(...)
-
@@ -7339,7 +10514,7 @@ client.flows.delete(
-
-Move the Flow to a different path or change the name.
+Update the name or description of the Agent version.
@@ -7359,9 +10534,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
+client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
)
```
@@ -7378,7 +10553,7 @@ client.flows.move(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7386,7 +10561,7 @@ client.flows.move(
-
-**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7394,7 +10569,7 @@ client.flows.move(
-
-**name:** `typing.Optional[str]` — Name of the Flow.
+**name:** `typing.Optional[str]` — Name of the version.
@@ -7402,7 +10577,7 @@ client.flows.move(
-
-**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+**description:** `typing.Optional[str]` — Description of the version.
@@ -7422,7 +10597,7 @@ client.flows.move(
-client.flows.list(...)
+client.agents.get(...)
-
@@ -7434,7 +10609,10 @@ client.flows.move(
-
-Get a list of Flows.
+Retrieve the Agent with the given ID.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -7454,14 +10632,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.flows.list(
- size=1,
+client.agents.get(
+ id="id",
)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
```
@@ -7469,55 +10642,101 @@ for page in response.iter_pages():
-#### ⚙️ Parameters
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+client.agents.delete(...)
-
+#### 📝 Description
+
-
-**page:** `typing.Optional[int]` — Page number for pagination.
-
-
-
-
-
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Flows to fetch.
-
+Delete the Agent with the given ID.
+
+
+#### 🔌 Usage
+
-
-**name:** `typing.Optional[str]` — Case-insensitive filter for Flow name.
-
-
-
-
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
-
+```python
+from humanloop import Humanloop
+
+client = Humanloop(
+ api_key="YOUR_API_KEY",
+)
+client.agents.delete(
+ id="id",
+)
+
+```
+
+
+
+#### ⚙️ Parameters
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
-
-
-
-
-
-**order:** `typing.Optional[SortOrder]` — Direction to sort by.
+**id:** `str` — Unique identifier for Agent.
@@ -7537,7 +10756,7 @@ for page in response.iter_pages():
-client.flows.upsert(...)
+client.agents.move(...)
-
@@ -7549,13 +10768,7 @@ for page in response.iter_pages():
-
-Create or update a Flow.
-
-Flows can also be identified by the `ID` or their `path`.
-
-You can provide `version_name` and `version_description` to identify and describe your versions.
-Version names must be unique within a Flow - attempting to create a version with a name
-that already exists will result in a 409 Conflict error.
+Move the Agent to a different path or change the name.
@@ -7575,22 +10788,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
+client.agents.move(
+ id="id",
)
```
@@ -7607,15 +10806,7 @@ client.flows.upsert(
-
-**attributes:** `typing.Dict[str, typing.Optional[typing.Any]]` — A key-value object identifying the Flow Version.
-
-
-
-
-
--
-
-**path:** `typing.Optional[str]` — Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+**id:** `str` — Unique identifier for Agent.
@@ -7623,7 +10814,7 @@ client.flows.upsert(
-
-**id:** `typing.Optional[str]` — ID for an existing Flow.
+**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier.
@@ -7631,7 +10822,7 @@ client.flows.upsert(
-
-**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow.
+**name:** `typing.Optional[str]` — Name of the Flow.
@@ -7639,7 +10830,7 @@ client.flows.upsert(
-
-**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
+**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`.
@@ -7659,7 +10850,7 @@ client.flows.upsert(
-client.flows.list_versions(...)
+client.agents.list_versions(...)
-
@@ -7671,7 +10862,7 @@ client.flows.upsert(
-
-Get a list of all the versions of a Flow.
+Get a list of all the versions of a Agent.
@@ -7691,8 +10882,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.list_versions(
+ id="id",
)
```
@@ -7709,7 +10900,7 @@ client.flows.list_versions(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7737,7 +10928,7 @@ client.flows.list_versions(
-client.flows.delete_flow_version(...)
+client.agents.set_deployment(...)
-
@@ -7749,7 +10940,10 @@ client.flows.list_versions(
-
-Delete a version of the Flow.
+Deploy Agent to an Environment.
+
+Set the deployed version for the specified Environment. This Agent
+will be used for calls made to the Agent in this Environment.
@@ -7769,8 +10963,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.delete_flow_version(
+client.agents.set_deployment(
id="id",
+ environment_id="environment_id",
version_id="version_id",
)
@@ -7788,7 +10983,7 @@ client.flows.delete_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -7796,7 +10991,15 @@ client.flows.delete_flow_version(
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
+
+
+
+
+
+-
+
+**version_id:** `str` — Unique identifier for the specific version of the Agent.
@@ -7816,7 +11019,7 @@ client.flows.delete_flow_version(
-client.flows.update_flow_version(...)
+client.agents.remove_deployment(...)
-
@@ -7828,7 +11031,10 @@ client.flows.delete_flow_version(
-
-Update the name or description of the Flow version.
+Remove deployed Agent from the Environment.
+
+Remove the deployed version for the specified Environment. This Agent
+will no longer be used for calls made to the Agent in this Environment.
@@ -7848,9 +11054,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_flow_version(
+client.agents.remove_deployment(
id="id",
- version_id="version_id",
+ environment_id="environment_id",
)
```
@@ -7867,23 +11073,7 @@ client.flows.update_flow_version(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
-
-
-
-
-
--
-
-**name:** `typing.Optional[str]` — Name of the version.
+**id:** `str` — Unique identifier for Agent.
@@ -7891,7 +11081,7 @@ client.flows.update_flow_version(
-
-**description:** `typing.Optional[str]` — Description of the version.
+**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
@@ -7911,7 +11101,7 @@ client.flows.update_flow_version(
-client.flows.set_deployment(...)
+client.agents.list_environments(...)
-
@@ -7923,10 +11113,7 @@ client.flows.update_flow_version(
-
-Deploy Flow to an Environment.
-
-Set the deployed version for the specified Environment. This Flow
-will be used for calls made to the Flow in this Environment.
+List all Environments and their deployed versions for the Agent.
@@ -7946,10 +11133,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
+client.agents.list_environments(
+ id="id",
)
```
@@ -7966,23 +11151,7 @@ client.flows.set_deployment(
-
-**id:** `str` — Unique identifier for Flow.
-
-
-
-
-
--
-
-**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to.
-
-
-
-
-
--
-
-**version_id:** `str` — Unique identifier for the specific version of the Flow.
+**id:** `str` — Unique identifier for Agent.
@@ -8002,7 +11171,7 @@ client.flows.set_deployment(
-client.flows.remove_deployment(...)
+client.agents.update_monitoring(...)
-
@@ -8014,10 +11183,10 @@ client.flows.set_deployment(
-
-Remove deployed Flow from the Environment.
+Activate and deactivate Evaluators for monitoring the Agent.
-Remove the deployed version for the specified Environment. This Flow
-will no longer be used for calls made to the Flow in this Environment.
+An activated Evaluator will automatically be run on all new Logs
+within the Agent for monitoring purposes.
@@ -8037,9 +11206,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
+client.agents.update_monitoring(
+ id="id",
)
```
@@ -8056,7 +11224,7 @@ client.flows.remove_deployment(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str`
@@ -8064,7 +11232,19 @@ client.flows.remove_deployment(
-
-**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from.
+**activate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
+]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+
+
+
+
+-
+
+**deactivate:** `typing.Optional[
+ typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
+]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8084,7 +11264,7 @@ client.flows.remove_deployment(
-client.flows.list_environments(...)
+client.agents.serialize(...)
-
@@ -8096,7 +11276,13 @@ client.flows.remove_deployment(
-
-List all Environments and their deployed versions for the Flow.
+Serialize an Agent to the .agent file format.
+
+Useful for storing the Agent with your code in a version control system,
+or for editing with an AI tool.
+
+By default, the deployed version of the Agent is returned. Use the query parameters
+`version_id` or `environment` to target a specific version of the Agent.
@@ -8116,8 +11302,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
+client.agents.serialize(
+ id="id",
)
```
@@ -8134,7 +11320,23 @@ client.flows.list_environments(
-
-**id:** `str` — Unique identifier for Flow.
+**id:** `str` — Unique identifier for Agent.
+
+
+
+
+
+-
+
+**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve.
+
+
+
+
+
+-
+
+**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from.
@@ -8154,7 +11356,7 @@ client.flows.list_environments(
-client.flows.update_monitoring(...)
+client.agents.deserialize(...)
-
@@ -8166,10 +11368,10 @@ client.flows.list_environments(
-
-Activate and deactivate Evaluators for monitoring the Flow.
+Deserialize an Agent from the .agent file format.
-An activated Evaluator will automatically be run on all new "completed" Logs
-within the Flow for monitoring purposes.
+This returns a subset of the attributes required by an Agent.
+This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
@@ -8189,9 +11391,8 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
+client.agents.deserialize(
+ agent="agent",
)
```
@@ -8208,27 +11409,7 @@ client.flows.update_monitoring(
-
-**id:** `str`
-
-
-
-
-
--
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
-
-
-
-
-
--
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**agent:** `str`
@@ -8702,6 +11883,14 @@ client.files.list_files()
-
+**directory:** `typing.Optional[str]` — Case-insensitive filter for directory name.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -8742,6 +11931,14 @@ client.files.list_files()
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -8820,6 +12017,14 @@ client.files.retrieve_by_path(
-
+**include_content:** `typing.Optional[bool]` — Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -10190,7 +13395,7 @@ for page in response.iter_pages():
-
-**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+**include_trace_children:** `typing.Optional[bool]` — If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 0c431892..2ad9d39e 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,16 +1,45 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentCallResponse,
+ AgentCallResponseToolChoice,
+ AgentCallStreamResponse,
+ AgentCallStreamResponsePayload,
AgentConfigResponse,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
+ AgentInlineTool,
+ AgentKernelRequest,
+ AgentKernelRequestReasoningEffort,
+ AgentKernelRequestStop,
+ AgentKernelRequestTemplate,
+ AgentKernelRequestToolsItem,
+ AgentLinkedFileRequest,
+ AgentLinkedFileResponse,
+ AgentLinkedFileResponseFile,
+ AgentLogResponse,
+ AgentLogResponseToolChoice,
+ AgentLogStreamResponse,
+ AgentResponse,
+ AgentResponseReasoningEffort,
+ AgentResponseStop,
+ AgentResponseTemplate,
+ AgentResponseToolsItem,
+ AnthropicRedactedThinkingContent,
+ AnthropicThinkingContent,
BaseModelsUserResponse,
BooleanEvaluatorStatsResponse,
ChatMessage,
ChatMessageContent,
ChatMessageContentItem,
+ ChatMessageThinkingItem,
ChatRole,
ChatToolType,
CodeEvaluatorRequest,
ConfigToolResponse,
+ CreateAgentLogResponse,
CreateDatapointRequest,
CreateDatapointRequestTargetValue,
CreateEvaluatorLogResponse,
@@ -55,10 +84,12 @@
EvaluatorReturnTypeEnum,
EvaluatorVersionId,
EvaluatorsRequest,
+ EventType,
ExternalEvaluatorRequest,
FeedbackType,
FileEnvironmentResponse,
FileEnvironmentResponseFile,
+ FileEnvironmentVariableRequest,
FileId,
FilePath,
FileRequest,
@@ -76,7 +107,9 @@
ImageUrl,
ImageUrlDetail,
InputResponse,
+ LinkedFileRequest,
LinkedToolResponse,
+ ListAgents,
ListDatasets,
ListEvaluators,
ListFlows,
@@ -85,6 +118,7 @@
LlmEvaluatorRequest,
LogResponse,
LogStatus,
+ LogStreamResponse,
ModelEndpoints,
ModelProviders,
MonitoringEvaluatorEnvironmentRequest,
@@ -93,15 +127,18 @@
MonitoringEvaluatorVersionRequest,
NumericEvaluatorStatsResponse,
ObservabilityStatus,
+ OnAgentCallEnum,
+ OpenAiReasoningEffort,
OverallStats,
+ PaginatedDataAgentResponse,
PaginatedDataEvaluationLogResponse,
PaginatedDataEvaluatorResponse,
PaginatedDataFlowResponse,
PaginatedDataLogResponse,
PaginatedDataPromptResponse,
PaginatedDataToolResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
PaginatedDatapointResponse,
PaginatedDatasetResponse,
PaginatedEvaluationResponse,
@@ -110,6 +147,7 @@
PlatformAccessEnum,
PopulateTemplateResponse,
PopulateTemplateResponsePopulatedTemplate,
+ PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
ProjectSortBy,
@@ -118,15 +156,16 @@
PromptCallResponseToolChoice,
PromptCallStreamResponse,
PromptKernelRequest,
+ PromptKernelRequestReasoningEffort,
PromptKernelRequestStop,
PromptKernelRequestTemplate,
PromptLogResponse,
PromptLogResponseToolChoice,
PromptResponse,
+ PromptResponseReasoningEffort,
PromptResponseStop,
PromptResponseTemplate,
ProviderApiKeys,
- ReasoningEffort,
ResponseFormat,
ResponseFormatType,
RunStatsResponse,
@@ -139,6 +178,7 @@
TextEvaluatorStatsResponse,
TimeUnit,
ToolCall,
+ ToolCallResponse,
ToolChoice,
ToolFunction,
ToolKernelRequest,
@@ -162,7 +202,29 @@
VersionStatus,
)
from .errors import UnprocessableEntityError
-from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
+from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoice,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffort,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStop,
+ AgentRequestStopParams,
+ AgentRequestTemplate,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItem,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoice,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoice,
+ AgentsCallStreamRequestToolChoiceParams,
+)
from .client import AsyncHumanloop, Humanloop
from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
from .environment import HumanloopEnvironment
@@ -186,26 +248,63 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffort,
+ PromptRequestReasoningEffortParams,
PromptRequestStop,
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
from .requests import (
+ AgentCallResponseParams,
+ AgentCallResponseToolChoiceParams,
+ AgentCallStreamResponseParams,
+ AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
+ AgentInlineToolParams,
+ AgentKernelRequestParams,
+ AgentKernelRequestReasoningEffortParams,
+ AgentKernelRequestStopParams,
+ AgentKernelRequestTemplateParams,
+ AgentKernelRequestToolsItemParams,
+ AgentLinkedFileRequestParams,
+ AgentLinkedFileResponseFileParams,
+ AgentLinkedFileResponseParams,
+ AgentLogResponseParams,
+ AgentLogResponseToolChoiceParams,
+ AgentLogStreamResponseParams,
+ AgentResponseParams,
+ AgentResponseReasoningEffortParams,
+ AgentResponseStopParams,
+ AgentResponseTemplateParams,
+ AgentResponseToolsItemParams,
+ AnthropicRedactedThinkingContentParams,
+ AnthropicThinkingContentParams,
BooleanEvaluatorStatsResponseParams,
ChatMessageContentItemParams,
ChatMessageContentParams,
ChatMessageParams,
+ ChatMessageThinkingItemParams,
CodeEvaluatorRequestParams,
+ CreateAgentLogResponseParams,
CreateDatapointRequestParams,
CreateDatapointRequestTargetValueParams,
CreateEvaluatorLogResponseParams,
@@ -245,6 +344,7 @@
ExternalEvaluatorRequestParams,
FileEnvironmentResponseFileParams,
FileEnvironmentResponseParams,
+ FileEnvironmentVariableRequestParams,
FileIdParams,
FilePathParams,
FileRequestParams,
@@ -258,7 +358,9 @@
ImageChatContentParams,
ImageUrlParams,
InputResponseParams,
+ LinkedFileRequestParams,
LinkedToolResponseParams,
+ ListAgentsParams,
ListDatasetsParams,
ListEvaluatorsParams,
ListFlowsParams,
@@ -266,24 +368,27 @@
ListToolsParams,
LlmEvaluatorRequestParams,
LogResponseParams,
+ LogStreamResponseParams,
MonitoringEvaluatorEnvironmentRequestParams,
MonitoringEvaluatorResponseParams,
MonitoringEvaluatorVersionRequestParams,
NumericEvaluatorStatsResponseParams,
OverallStatsParams,
+ PaginatedDataAgentResponseParams,
PaginatedDataEvaluationLogResponseParams,
PaginatedDataEvaluatorResponseParams,
PaginatedDataFlowResponseParams,
PaginatedDataLogResponseParams,
PaginatedDataPromptResponseParams,
PaginatedDataToolResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
PaginatedDatapointResponseParams,
PaginatedDatasetResponseParams,
PaginatedEvaluationResponseParams,
PopulateTemplateResponseParams,
PopulateTemplateResponsePopulatedTemplateParams,
+ PopulateTemplateResponseReasoningEffortParams,
PopulateTemplateResponseStopParams,
PopulateTemplateResponseTemplateParams,
PromptCallLogResponseParams,
@@ -291,11 +396,13 @@
PromptCallResponseToolChoiceParams,
PromptCallStreamResponseParams,
PromptKernelRequestParams,
+ PromptKernelRequestReasoningEffortParams,
PromptKernelRequestStopParams,
PromptKernelRequestTemplateParams,
PromptLogResponseParams,
PromptLogResponseToolChoiceParams,
PromptResponseParams,
+ PromptResponseReasoningEffortParams,
PromptResponseStopParams,
PromptResponseTemplateParams,
ProviderApiKeysParams,
@@ -307,6 +414,7 @@
TextChatContentParams,
TextEvaluatorStatsResponseParams,
ToolCallParams,
+ ToolCallResponseParams,
ToolChoiceParams,
ToolFunctionParams,
ToolKernelRequestParams,
@@ -329,8 +437,82 @@
__all__ = [
"AddEvaluatorsRequestEvaluatorsItem",
"AddEvaluatorsRequestEvaluatorsItemParams",
+ "AgentCallResponse",
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoice",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayload",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineTool",
+ "AgentInlineToolParams",
+ "AgentKernelRequest",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItem",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentLogResponse",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoice",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponse",
+ "AgentLogStreamResponseParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentResponse",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffort",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStop",
+ "AgentResponseStopParams",
+ "AgentResponseTemplate",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItem",
+ "AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContent",
+ "AnthropicThinkingContentParams",
"AsyncHumanloop",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
@@ -341,11 +523,15 @@
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItem",
+ "ChatMessageThinkingItemParams",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"CodeEvaluatorRequestParams",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequest",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValue",
@@ -438,6 +624,7 @@
"EvaluatorVersionId",
"EvaluatorVersionIdParams",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"ExternalEvaluatorRequestParams",
"FeedbackType",
@@ -445,6 +632,8 @@
"FileEnvironmentResponseFile",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequest",
+ "FileEnvironmentVariableRequestParams",
"FileId",
"FileIdParams",
"FilePath",
@@ -477,8 +666,12 @@
"ImageUrlParams",
"InputResponse",
"InputResponseParams",
+ "LinkedFileRequest",
+ "LinkedFileRequestParams",
"LinkedToolResponse",
"LinkedToolResponseParams",
+ "ListAgents",
+ "ListAgentsParams",
"ListDatasets",
"ListDatasetsParams",
"ListEvaluators",
@@ -495,6 +688,8 @@
"LogResponse",
"LogResponseParams",
"LogStatus",
+ "LogStreamResponse",
+ "LogStreamResponseParams",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -507,8 +702,12 @@
"NumericEvaluatorStatsResponse",
"NumericEvaluatorStatsResponseParams",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
"OverallStatsParams",
+ "PaginatedDataAgentResponse",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponse",
@@ -521,10 +720,10 @@
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponse",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponse",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponse",
@@ -538,6 +737,8 @@
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplate",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffort",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
@@ -553,10 +754,14 @@
"PromptCallStreamResponseParams",
"PromptKernelRequest",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffort",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStop",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -565,23 +770,30 @@
"PromptLogResponseToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
"PromptResponse",
"PromptResponseParams",
+ "PromptResponseReasoningEffort",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStop",
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
"ProviderApiKeysParams",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatParams",
"ResponseFormatType",
@@ -604,6 +816,8 @@
"TimeUnit",
"ToolCall",
"ToolCallParams",
+ "ToolCallResponse",
+ "ToolCallResponseParams",
"ToolChoice",
"ToolChoiceParams",
"ToolFunction",
@@ -643,6 +857,7 @@
"VersionStatsResponseParams",
"VersionStatus",
"__version__",
+ "agents",
"datasets",
"directories",
"evaluations",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
new file mode 100644
index 00000000..ab2a2f9e
--- /dev/null
+++ b/src/humanloop/agents/__init__.py
@@ -0,0 +1,49 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .types import (
+ AgentLogRequestAgent,
+ AgentLogRequestToolChoice,
+ AgentRequestReasoningEffort,
+ AgentRequestStop,
+ AgentRequestTemplate,
+ AgentRequestToolsItem,
+ AgentsCallRequestAgent,
+ AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestToolChoice,
+)
+from .requests import (
+ AgentLogRequestAgentParams,
+ AgentLogRequestToolChoiceParams,
+ AgentRequestReasoningEffortParams,
+ AgentRequestStopParams,
+ AgentRequestTemplateParams,
+ AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
+ AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
+ AgentsCallStreamRequestToolChoiceParams,
+)
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoice",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffort",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStop",
+ "AgentRequestStopParams",
+ "AgentRequestTemplate",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItem",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoice",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
new file mode 100644
index 00000000..5cc38277
--- /dev/null
+++ b/src/humanloop/agents/client.py
@@ -0,0 +1,3210 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from .raw_client import RawAgentsClient
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.log_response import LogResponse
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from .raw_client import AsyncRawAgentsClient
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class AgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._raw_client = RawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> RawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ RawAgentsClient
+ """
+ return self._raw_client
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.log()
+ """
+ response = self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+ """
+ response = self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ response = client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ for chunk in response:
+ yield chunk
+ """
+ with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ yield from r.data
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ """
+ response = self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.upsert(
+ model="model",
+ )
+ """
+ response = self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.get(
+ id="id",
+ )
+ """
+ response = self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.delete(
+ id="id",
+ )
+ """
+ response = self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.move(
+ id="id",
+ )
+ """
+ response = self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_versions(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+ """
+ response = self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+ """
+ response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.list_environments(
+ id="id",
+ )
+ """
+ response = self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.update_monitoring(
+ id="id",
+ )
+ """
+ response = self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.agents.deserialize(
+ agent="agent",
+ )
+ """
+ response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
+
+
+class AsyncAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._raw_client = AsyncRawAgentsClient(client_wrapper=client_wrapper)
+
+ @property
+ def with_raw_response(self) -> AsyncRawAgentsClient:
+ """
+ Retrieves a raw implementation of this client that returns raw responses.
+
+ Returns
+ -------
+ AsyncRawAgentsClient
+ """
+ return self._raw_client
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> CreateAgentLogResponse:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.log()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.log(
+ version_id=version_id,
+ environment=environment,
+ run_id=run_id,
+ path=path,
+ id=id,
+ output_message=output_message,
+ prompt_tokens=prompt_tokens,
+ reasoning_tokens=reasoning_tokens,
+ output_tokens=output_tokens,
+ prompt_cost=prompt_cost,
+ output_cost=output_cost,
+ finish_reason=finish_reason,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ start_time=start_time,
+ end_time=end_time,
+ output=output,
+ created_at=created_at,
+ error=error,
+ provider_latency=provider_latency,
+ stdout=stdout,
+ provider_request=provider_request,
+ provider_response=provider_response,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agent_log_request_environment=agent_log_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> LogResponse:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ LogResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_log(
+ id="id",
+ log_id="log_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_log(
+ id,
+ log_id,
+ messages=messages,
+ output_message=output_message,
+ inputs=inputs,
+ output=output,
+ error=error,
+ log_status=log_status,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentCallStreamResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.call_stream(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_stream_request_environment=agents_call_stream_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentCallResponse:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentCallResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ messages=messages,
+ tool_choice=tool_choice,
+ agent=agent,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ agents_call_request_environment=agents_call_request_environment,
+ save=save,
+ log_id=log_id,
+ provider_api_keys=provider_api_keys,
+ return_inputs=return_inputs,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AgentContinueStreamResponse]
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ response = await client.agents.continue_stream(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+ async for chunk in response:
+ yield chunk
+
+
+ asyncio.run(main())
+ """
+ async with self._raw_client.continue_stream(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ ) as r:
+ async for data in r.data:
+ yield data
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentContinueResponse:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentContinueResponse
+
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.continue_(
+ log_id=log_id,
+ messages=messages,
+ provider_api_keys=provider_api_keys,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> PaginatedDataAgentResponse:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PaginatedDataAgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.upsert(
+ model="model",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.upsert(
+ model=model,
+ path=path,
+ id=id,
+ endpoint=endpoint,
+ template=template,
+ template_language=template_language,
+ provider=provider,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ stop=stop,
+ presence_penalty=presence_penalty,
+ frequency_penalty=frequency_penalty,
+ other=other,
+ seed=seed,
+ response_format=response_format,
+ reasoning_effort=reasoning_effort,
+ tools=tools,
+ attributes=attributes,
+ max_iterations=max_iterations,
+ version_name=version_name,
+ version_description=version_description,
+ description=description,
+ tags=tags,
+ readme=readme,
+ request_options=request_options,
+ )
+ return response.data
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return response.data
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.patch_agent_version(
+ id="id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.patch_agent_version(
+ id, version_id, name=name, description=description, request_options=request_options
+ )
+ return response.data
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.get(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.delete(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete(id, request_options=request_options)
+ return response.data
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.move(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.move(
+ id, path=path, name=name, directory_id=directory_id, request_options=request_options
+ )
+ return response.data
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ListAgents:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ListAgents
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_versions(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_versions(
+ id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
+ )
+ return response.data
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentResponse:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.set_deployment(
+ id="id",
+ environment_id="environment_id",
+ version_id="version_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.set_deployment(
+ id, environment_id, version_id=version_id, request_options=request_options
+ )
+ return response.data
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> None:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.remove_deployment(
+ id="id",
+ environment_id="environment_id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return response.data
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentResponse]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentResponse]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.list_environments(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.list_environments(id, request_options=request_options)
+ return response.data
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AgentResponse:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.update_monitoring(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.update_monitoring(
+ id, activate=activate, deactivate=deactivate, request_options=request_options
+ )
+ return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AgentKernelRequest:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AgentKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.agents.deserialize(
+ agent="agent",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
new file mode 100644
index 00000000..226f3c35
--- /dev/null
+++ b/src/humanloop/agents/raw_client.py
@@ -0,0 +1,3891 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..core.client_wrapper import SyncClientWrapper
+from ..requests.chat_message import ChatMessageParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from ..core.request_options import RequestOptions
+from ..core.http_response import HttpResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..core.serialization import convert_and_respect_annotation_metadata
+from ..core.unchecked_base_model import construct_type
+from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.http_validation_error import HttpValidationError
+from json.decoder import JSONDecodeError
+from ..core.api_error import ApiError
+from ..types.log_response import LogResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+import httpx_sse
+import contextlib
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.project_sort_by import ProjectSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.model_endpoints import ModelEndpoints
+from .requests.agent_request_template import AgentRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .requests.agent_request_stop import AgentRequestStopParams
+from ..requests.response_format import ResponseFormatParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
+from ..types.list_agents import ListAgents
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..core.client_wrapper import AsyncClientWrapper
+from ..core.http_response import AsyncHttpResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class RawAgentsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.contextmanager
+ def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
+
+ """
+ with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ for _sse in _event_source.iter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return HttpResponse(response=_response, data=_iter())
+ _response.read()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield stream()
+
+ def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentContinueResponse]
+
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[None]
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return HttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawAgentsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ run_id: typing.Optional[str] = OMIT,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ prompt_tokens: typing.Optional[int] = OMIT,
+ reasoning_tokens: typing.Optional[int] = OMIT,
+ output_tokens: typing.Optional[int] = OMIT,
+ prompt_cost: typing.Optional[float] = OMIT,
+ output_cost: typing.Optional[float] = OMIT,
+ finish_reason: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agent_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateAgentLogResponse]:
+ """
+ Create an Agent Log.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
+
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
+
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
+
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
+
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
+
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
+
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
+
+ error : typing.Optional[str]
+ Error message if the log is an error.
+
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
+
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
+
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
+
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/log",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "run_id": run_id,
+ "path": path,
+ "id": id,
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "prompt_tokens": prompt_tokens,
+ "reasoning_tokens": reasoning_tokens,
+ "output_tokens": output_tokens,
+ "prompt_cost": prompt_cost,
+ "output_cost": output_cost,
+ "finish_reason": finish_reason,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
+ ),
+ "start_time": start_time,
+ "end_time": end_time,
+ "output": output,
+ "created_at": created_at,
+ "error": error,
+ "provider_latency": provider_latency,
+ "stdout": stdout,
+ "provider_request": provider_request,
+ "provider_response": provider_response,
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agent_log_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ CreateAgentLogResponse,
+ construct_type(
+ type_=CreateAgentLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_log(
+ self,
+ id: str,
+ log_id: str,
+ *,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ output_message: typing.Optional[ChatMessageParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[LogResponse]:
+ """
+ Update a Log.
+
+ Update the details of a Log with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ log_id : str
+ Unique identifier for the Log.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ List of chat messages that were used as an input to the Flow.
+
+ output_message : typing.Optional[ChatMessageParams]
+ The output message returned by this Flow.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the Flow Log.
+
+ output : typing.Optional[str]
+ The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`.
+
+ error : typing.Optional[str]
+ The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
+
+ log_status : typing.Optional[LogStatus]
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[LogResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}",
+ method="PATCH",
+ json={
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "output_message": convert_and_respect_annotation_metadata(
+ object_=output_message, annotation=ChatMessageParams, direction="write"
+ ),
+ "inputs": inputs,
+ "output": output,
+ "error": error,
+ "log_status": log_status,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ LogResponse,
+ construct_type(
+ type_=LogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def call_stream(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_stream_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_stream_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
+ tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ agents_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ return_inputs: typing.Optional[bool] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentCallResponse]:
+ """
+ Call an Agent.
+
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
+
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The agent configuration to use. Two formats are supported:
+ - An `AgentKernelRequest` object containing the agent configuration
+ - A string containing a serialized .agent file
+ A new Agent version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "tool_choice": convert_and_respect_annotation_metadata(
+ object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
+ ),
+ "agent": convert_and_respect_annotation_metadata(
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": agents_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "return_inputs": return_inputs,
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentCallResponse,
+ construct_type(
+ type_=AgentCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ @contextlib.asynccontextmanager
+ async def continue_stream(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
+
+ """
+ async with self._client_wrapper.httpx_client.stream(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": True,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ ) as _response:
+
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
+ try:
+ if 200 <= _response.status_code < 300:
+
+ async def _iter():
+ _event_source = httpx_sse.EventSource(_response)
+ async for _sse in _event_source.aiter_sse():
+ if _sse.data == None:
+ return
+ try:
+ yield _sse.data()
+ except Exception:
+ pass
+ return
+
+ return AsyncHttpResponse(response=_response, data=_iter())
+ await _response.aread()
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ yield await stream()
+
+ async def continue_(
+ self,
+ *,
+ log_id: str,
+ messages: typing.Sequence[ChatMessageParams],
+ provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
+ include_trace_children: typing.Optional[bool] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
+ """
+ Continue an incomplete Agent call.
+
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
+
+ The original log must be in an incomplete state to be continued.
+
+ The messages in the request will be appended
+ to the original messages in the log.
+
+ Parameters
+ ----------
+ log_id : str
+ This identifies the Agent Log to continue.
+
+ messages : typing.Sequence[ChatMessageParams]
+ The additional messages with which to continue the Agent Log. Often, these should start with the Tool messages with results for the previous Assistant message's tool calls.
+
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Defaults to false.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentContinueResponse]
+
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/continue",
+ method="POST",
+ json={
+ "log_id": log_id,
+ "messages": convert_and_respect_annotation_metadata(
+ object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write"
+ ),
+ "provider_api_keys": convert_and_respect_annotation_metadata(
+ object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write"
+ ),
+ "include_trace_children": include_trace_children,
+ "stream": False,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[ProjectSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[ProjectSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def upsert(
+ self,
+ *,
+ model: str,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[ModelEndpoints] = OMIT,
+ template: typing.Optional[AgentRequestTemplateParams] = OMIT,
+ template_language: typing.Optional[TemplateLanguage] = OMIT,
+ provider: typing.Optional[ModelProviders] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ temperature: typing.Optional[float] = OMIT,
+ top_p: typing.Optional[float] = OMIT,
+ stop: typing.Optional[AgentRequestStopParams] = OMIT,
+ presence_penalty: typing.Optional[float] = OMIT,
+ frequency_penalty: typing.Optional[float] = OMIT,
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ response_format: typing.Optional[ResponseFormatParams] = OMIT,
+ reasoning_effort: typing.Optional[AgentRequestReasoningEffortParams] = OMIT,
+ tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT,
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ max_iterations: typing.Optional[int] = OMIT,
+ version_name: typing.Optional[str] = OMIT,
+ version_description: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
+ readme: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Create an Agent or update it with a new version if it already exists.
+
+ Agents are identified by the `ID` or their `path`. The parameters (i.e. the template, temperature, model etc.) and
+ tools determine the versions of the Agent.
+
+ You can provide `version_name` and `version_description` to identify and describe your versions.
+ Version names must be unique within an Agent - attempting to create a version with a name
+ that already exists will result in a 409 Conflict error.
+
+ Parameters
+ ----------
+ model : str
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Agent.
+
+ endpoint : typing.Optional[ModelEndpoints]
+ The provider model endpoint used.
+
+ template : typing.Optional[AgentRequestTemplateParams]
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+
+ template_language : typing.Optional[TemplateLanguage]
+ The template language to use for rendering the template.
+
+ provider : typing.Optional[ModelProviders]
+ The company providing the underlying model service.
+
+ max_tokens : typing.Optional[int]
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+
+ temperature : typing.Optional[float]
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+
+ top_p : typing.Optional[float]
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+
+ stop : typing.Optional[AgentRequestStopParams]
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+
+ presence_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+
+ frequency_penalty : typing.Optional[float]
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+
+ other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Other parameter values to be passed to the provider call.
+
+ seed : typing.Optional[int]
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+
+ response_format : typing.Optional[ResponseFormatParams]
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+
+ reasoning_effort : typing.Optional[AgentRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+
+ tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]
+
+ attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+
+ max_iterations : typing.Optional[int]
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+
+ version_name : typing.Optional[str]
+ Unique name for the Prompt version. Each Prompt can only have one version with a given name.
+
+ version_description : typing.Optional[str]
+ Description of the Version.
+
+ description : typing.Optional[str]
+ Description of the Prompt.
+
+ tags : typing.Optional[typing.Sequence[str]]
+ List of tags associated with this prompt.
+
+ readme : typing.Optional[str]
+ Long description of the Prompt.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="POST",
+ json={
+ "path": path,
+ "id": id,
+ "model": model,
+ "endpoint": endpoint,
+ "template": convert_and_respect_annotation_metadata(
+ object_=template, annotation=AgentRequestTemplateParams, direction="write"
+ ),
+ "template_language": template_language,
+ "provider": provider,
+ "max_tokens": max_tokens,
+ "temperature": temperature,
+ "top_p": top_p,
+ "stop": convert_and_respect_annotation_metadata(
+ object_=stop, annotation=AgentRequestStopParams, direction="write"
+ ),
+ "presence_penalty": presence_penalty,
+ "frequency_penalty": frequency_penalty,
+ "other": other,
+ "seed": seed,
+ "response_format": convert_and_respect_annotation_metadata(
+ object_=response_format, annotation=ResponseFormatParams, direction="write"
+ ),
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=AgentRequestReasoningEffortParams, direction="write"
+ ),
+ "tools": convert_and_respect_annotation_metadata(
+ object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write"
+ ),
+ "attributes": attributes,
+ "max_iterations": max_iterations,
+ "version_name": version_name,
+ "version_description": version_description,
+ "description": description,
+ "tags": tags,
+ "readme": readme,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_agent_version(
+ self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete a version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def patch_agent_version(
+ self,
+ id: str,
+ version_id: str,
+ *,
+ name: typing.Optional[str] = OMIT,
+ description: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Update the name or description of the Agent version.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ name : typing.Optional[str]
+ Name of the version.
+
+ description : typing.Optional[str]
+ Description of the version.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}",
+ method="PATCH",
+ json={
+ "name": name,
+ "description": description,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Retrieve the Agent with the given ID.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Delete the Agent with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def move(
+ self,
+ id: str,
+ *,
+ path: typing.Optional[str] = OMIT,
+ name: typing.Optional[str] = OMIT,
+ directory_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Move the Agent to a different path or change the name.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ path : typing.Optional[str]
+ Path of the Flow including the Flow name, which is used as a unique identifier.
+
+ name : typing.Optional[str]
+ Name of the Flow.
+
+ directory_id : typing.Optional[str]
+ Unique identifier for the Directory to move Flow to. Starts with `dir_`.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}",
+ method="PATCH",
+ json={
+ "path": path,
+ "name": name,
+ "directory_id": directory_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_versions(
+ self,
+ id: str,
+ *,
+ evaluator_aggregates: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ListAgents]:
+ """
+ Get a list of all the versions of a Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ evaluator_aggregates : typing.Optional[bool]
+ Whether to include Evaluator aggregate results for the versions in the response
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ListAgents]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/versions",
+ method="GET",
+ params={
+ "evaluator_aggregates": evaluator_aggregates,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ListAgents,
+ construct_type(
+ type_=ListAgents, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def set_deployment(
+ self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Deploy Agent to an Environment.
+
+ Set the deployed version for the specified Environment. This Agent
+ will be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to deploy the Version to.
+
+ version_id : str
+ Unique identifier for the specific version of the Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="POST",
+ params={
+ "version_id": version_id,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def remove_deployment(
+ self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[None]:
+ """
+ Remove deployed Agent from the Environment.
+
+ Remove the deployed version for the specified Environment. This Agent
+ will no longer be used for calls made to the Agent in this Environment.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ environment_id : str
+ Unique identifier for the Environment to remove the deployment from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[None]
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return AsyncHttpResponse(response=_response, data=None)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list_environments(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]:
+ """
+ List all Environments and their deployed versions for the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentResponse]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/environments",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentResponse],
+ construct_type(
+ type_=typing.List[FileEnvironmentResponse], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def update_monitoring(
+ self,
+ id: str,
+ *,
+ activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT,
+ deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[AgentResponse]:
+ """
+ Activate and deactivate Evaluators for monitoring the Agent.
+
+ An activated Evaluator will automatically be run on all new Logs
+ within the Agent for monitoring purposes.
+
+ Parameters
+ ----------
+ id : str
+
+ activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]
+ Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+
+ deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]
+ Evaluators to deactivate. These will not be run on new Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/evaluators",
+ method="POST",
+ json={
+ "activate": convert_and_respect_annotation_metadata(
+ object_=activate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams],
+ direction="write",
+ ),
+ "deactivate": convert_and_respect_annotation_metadata(
+ object_=deactivate,
+ annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams],
+ direction="write",
+ ),
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentResponse,
+ construct_type(
+ type_=AgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize an Agent to the .agent file format.
+
+ Useful for storing the Agent with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Agent is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Agent.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Agent.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"agents/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[AgentKernelRequest]:
+ """
+ Deserialize an Agent from the .agent file format.
+
+ This returns a subset of the attributes required by an Agent.
+ This subset is the bit that defines the Agent version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ agent : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[AgentKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents/deserialize",
+ method="POST",
+ json={
+ "agent": agent,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ AgentKernelRequest,
+ construct_type(
+ type_=AgentKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
new file mode 100644
index 00000000..06ce37ed
--- /dev/null
+++ b/src/humanloop/agents/requests/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgentParams
+from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .agent_request_stop import AgentRequestStopParams
+from .agent_request_template import AgentRequestTemplateParams
+from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+
+__all__ = [
+ "AgentLogRequestAgentParams",
+ "AgentLogRequestToolChoiceParams",
+ "AgentRequestReasoningEffortParams",
+ "AgentRequestStopParams",
+ "AgentRequestTemplateParams",
+ "AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
+ "AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
+ "AgentsCallStreamRequestToolChoiceParams",
+]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..584112aa
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentLogRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..98a991cd
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py
new file mode 100644
index 00000000..3970451c
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
new file mode 100644
index 00000000..c251ce8e
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.chat_message import ChatMessageParams
+
+AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
new file mode 100644
index 00000000..20cde136
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+from ...requests.agent_inline_tool import AgentInlineToolParams
+
+AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..1e468fa0
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..bd068b6f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.tool_choice import ToolChoiceParams
+
+AgentsCallStreamRequestToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
new file mode 100644
index 00000000..9c8a955c
--- /dev/null
+++ b/src/humanloop/agents/types/__init__.py
@@ -0,0 +1,25 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .agent_log_request_agent import AgentLogRequestAgent
+from .agent_log_request_tool_choice import AgentLogRequestToolChoice
+from .agent_request_reasoning_effort import AgentRequestReasoningEffort
+from .agent_request_stop import AgentRequestStop
+from .agent_request_template import AgentRequestTemplate
+from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
+from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
+from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
+
+__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestToolChoice",
+ "AgentRequestReasoningEffort",
+ "AgentRequestStop",
+ "AgentRequestTemplate",
+ "AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestToolChoice",
+]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
new file mode 100644
index 00000000..bfb576c2
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentLogRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
new file mode 100644
index 00000000..b4267202
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py
new file mode 100644
index 00000000..325a6b2e
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
new file mode 100644
index 00000000..f6474824
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.chat_message import ChatMessage
+
+AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
new file mode 100644
index 00000000..e6c54b88
--- /dev/null
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
+from ...types.agent_inline_tool import AgentInlineTool
+
+AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
new file mode 100644
index 00000000..6dee5a04
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
new file mode 100644
index 00000000..83d264f0
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.tool_choice import ToolChoice
+
+AgentsCallStreamRequestToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index bf72be6a..a11298b8 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -11,6 +11,7 @@
from .datasets.client import DatasetsClient
from .evaluators.client import EvaluatorsClient
from .flows.client import FlowsClient
+from .agents.client import AgentsClient
from .directories.client import DirectoriesClient
from .files.client import FilesClient
from .evaluations.client import EvaluationsClient
@@ -21,6 +22,7 @@
from .datasets.client import AsyncDatasetsClient
from .evaluators.client import AsyncEvaluatorsClient
from .flows.client import AsyncFlowsClient
+from .agents.client import AsyncAgentsClient
from .directories.client import AsyncDirectoriesClient
from .files.client import AsyncFilesClient
from .evaluations.client import AsyncEvaluationsClient
@@ -96,6 +98,7 @@ def __init__(
self.datasets = DatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = FlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AgentsClient(client_wrapper=self._client_wrapper)
self.directories = DirectoriesClient(client_wrapper=self._client_wrapper)
self.files = FilesClient(client_wrapper=self._client_wrapper)
self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper)
@@ -171,6 +174,7 @@ def __init__(
self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper)
self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper)
self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper)
+ self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper)
self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper)
self.files = AsyncFilesClient(client_wrapper=self._client_wrapper)
self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper)
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index f25dc2ca..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.35",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.35",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index c07358d0..2d4e1855 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -7,8 +7,8 @@
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
from ..core.client_wrapper import AsyncClientWrapper
@@ -39,13 +39,15 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -60,6 +62,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -75,12 +80,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -96,11 +104,13 @@ def list_files(
page=page,
size=size,
name=name,
+ directory=directory,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -110,6 +120,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +134,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -143,7 +157,7 @@ def retrieve_by_path(
)
"""
response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path, environment=environment, include_content=include_content, request_options=request_options
)
return response.data
@@ -169,13 +183,15 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse:
+ ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
Get a paginated list of files.
@@ -190,6 +206,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -205,12 +224,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
Successful Response
Examples
@@ -234,11 +256,13 @@ async def main() -> None:
page=page,
size=size,
name=name,
+ directory=directory,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_content=include_content,
request_options=request_options,
)
return response.data
@@ -248,6 +272,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +286,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +317,6 @@ async def main() -> None:
asyncio.run(main())
"""
response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path, environment=environment, include_content=include_content, request_options=request_options
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 19f52cf2..1a30a892 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -7,8 +7,8 @@
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
@@ -33,13 +33,17 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> HttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -54,6 +58,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +76,15 @@ def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -84,20 +94,22 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "directory": directory,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -122,6 +134,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -135,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -148,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_content": include_content,
},
json={
"path": path,
@@ -194,13 +211,17 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ directory: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
sort_by: typing.Optional[ProjectSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]:
+ ) -> AsyncHttpResponse[
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+ ]:
"""
Get a paginated list of files.
@@ -215,6 +236,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ directory : typing.Optional[str]
+ Case-insensitive filter for directory name.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -230,12 +254,15 @@ async def list_files(
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]
+ AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -245,20 +272,22 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "directory": directory,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_content": include_content,
},
request_options=request_options,
)
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
construct_type(
- type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore
+ type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -283,6 +312,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -296,6 +326,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_content : typing.Optional[bool]
+ Whether to include the serialized file content in the response. Currently only supported for agents and prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -309,6 +342,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_content": include_content,
},
json={
"path": path,
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index c1618edb..8c070ab3 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,13 @@
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
+from ...requests.agent_response import AgentResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 48415fc9..46ea271a 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -6,7 +6,8 @@
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
+from ...types.agent_response import AgentResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index a11776fc..bcb9491c 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -214,10 +214,10 @@ def log(
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
"""
@@ -1128,10 +1128,10 @@ async def main() -> None:
output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
log_status="incomplete",
start_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:35+00:00",
+ "2024-07-08 21:40:35+00:00",
),
end_time=datetime.datetime.fromisoformat(
- "2024-07-08 19:40:39+00:00",
+ "2024-07-08 21:40:39+00:00",
),
)
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 17007c1b..b16d1f6b 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -99,7 +99,7 @@ def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -338,7 +338,7 @@ async def list(
If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow Logs.
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index c1147ff2..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,33 +1,49 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
+ PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
+ PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffort",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStop",
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index e2fff4c3..865c033f 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,9 +13,11 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.project_sort_by import ProjectSortBy
from ..types.sort_order import SortOrder
@@ -33,7 +35,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.populate_template_response import PopulateTemplateResponse
from ..types.list_prompts import ListPrompts
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawPromptsClient
from ..core.pagination import AsyncPager
@@ -84,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -165,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -256,7 +262,7 @@ def log(
messages=[{"role": "user", "content": "What really happened at Roswell?"}],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -479,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -537,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -648,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -706,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -962,7 +974,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -1037,8 +1049,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1599,6 +1611,93 @@ def update_monitoring(
)
return response.data
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.serialize(
+ id="id",
+ )
+ """
+ response = self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.deserialize(
+ prompt="prompt",
+ )
+ """
+ response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
+
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1632,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1713,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1810,7 +1912,7 @@ async def main() -> None:
],
inputs={"person": "Trump"},
created_at=datetime.datetime.fromisoformat(
- "2024-07-18 21:29:35.178000+00:00",
+ "2024-07-18 23:29:35.178000+00:00",
),
provider_latency=6.5931549072265625,
output_message={
@@ -2044,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2102,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2222,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2280,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2552,7 +2660,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2627,8 +2735,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -3284,3 +3392,106 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> str:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ str
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.serialize(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.serialize(
+ id, version_id=version_id, environment=environment, request_options=request_options
+ )
+ return response.data
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptKernelRequest:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptKernelRequest
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.prompts.deserialize(
+ prompt="prompt",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index b5334c82..f809f1b1 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -32,7 +34,7 @@
from ..types.model_providers import ModelProviders
from .requests.prompt_request_stop import PromptRequestStopParams
from ..requests.response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from ..requests.tool_function import ToolFunctionParams
from ..types.prompt_response import PromptResponse
from ..types.populate_template_response import PopulateTemplateResponse
@@ -44,6 +46,7 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.prompt_kernel_request import PromptKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -72,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -153,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -248,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -495,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -553,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -632,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -705,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -763,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -842,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -915,7 +927,7 @@ def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -990,8 +1002,8 @@ def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -1051,7 +1063,9 @@ def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -1744,6 +1758,127 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[str]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
class AsyncRawPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1766,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1847,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1942,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2189,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2247,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2326,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2399,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2457,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The prompt configuration to use. Two formats are supported:
+ - A `'PromptKernelRequest'` object containing the prompt configuration
+ - A string containing a serialized .prompt file
+ A new Prompt version will be created if the provided details are new.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2536,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2609,7 +2753,7 @@ async def upsert(
other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
seed: typing.Optional[int] = OMIT,
response_format: typing.Optional[ResponseFormatParams] = OMIT,
- reasoning_effort: typing.Optional[ReasoningEffort] = OMIT,
+ reasoning_effort: typing.Optional[PromptRequestReasoningEffortParams] = OMIT,
tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT,
linked_tools: typing.Optional[typing.Sequence[str]] = OMIT,
attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2684,8 +2828,8 @@ async def upsert(
response_format : typing.Optional[ResponseFormatParams]
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
- reasoning_effort : typing.Optional[ReasoningEffort]
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ reasoning_effort : typing.Optional[PromptRequestReasoningEffortParams]
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
tools : typing.Optional[typing.Sequence[ToolFunctionParams]]
The tool specification that the model can choose to call if Tool calling is supported.
@@ -2745,7 +2889,9 @@ async def upsert(
"response_format": convert_and_respect_annotation_metadata(
object_=response_format, annotation=ResponseFormatParams, direction="write"
),
- "reasoning_effort": reasoning_effort,
+ "reasoning_effort": convert_and_respect_annotation_metadata(
+ object_=reasoning_effort, annotation=PromptRequestReasoningEffortParams, direction="write"
+ ),
"tools": convert_and_respect_annotation_metadata(
object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write"
),
@@ -3439,3 +3585,124 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def serialize(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[str]:
+ """
+ Serialize a Prompt to the .prompt file format.
+
+ Useful for storing the Prompt with your code in a version control system,
+ or for editing with an AI tool.
+
+ By default, the deployed version of the Prompt is returned. Use the query parameters
+ `version_id` or `environment` to target a specific version of the Prompt.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Prompt.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[str]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"prompts/{jsonable_encoder(id)}/serialize",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return _response.text # type: ignore
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def deserialize(
+ self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[PromptKernelRequest]:
+ """
+ Deserialize a Prompt from the .prompt file format.
+
+ This returns a subset of the attributes required by a Prompt.
+ This subset is the bit that defines the Prompt version (e.g. with `model` and `temperature` etc)
+
+ Parameters
+ ----------
+ prompt : str
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PromptKernelRequest]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts/deserialize",
+ method="POST",
+ json={
+ "prompt": prompt,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PromptKernelRequest,
+ construct_type(
+ type_=PromptKernelRequest, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index c5119552..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
+ "PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..080a107e
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 644cf6b5..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,17 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
+from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
+ "PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
new file mode 100644
index 00000000..33f35288
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index bd9458ba..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,11 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponseParams
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_call_stream_response import AgentCallStreamResponseParams
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from .agent_inline_tool import AgentInlineToolParams
+from .agent_kernel_request import AgentKernelRequestParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_linked_file_response import AgentLinkedFileResponseParams
+from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+from .agent_log_response import AgentLogResponseParams
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+from .agent_response import AgentResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .agent_response_tools_item import AgentResponseToolsItemParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
from .chat_message import ChatMessageParams
from .chat_message_content import ChatMessageContentParams
from .chat_message_content_item import ChatMessageContentItemParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
from .code_evaluator_request import CodeEvaluatorRequestParams
+from .create_agent_log_response import CreateAgentLogResponseParams
from .create_datapoint_request import CreateDatapointRequestParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
from .create_evaluator_log_response import CreateEvaluatorLogResponseParams
@@ -51,6 +80,7 @@
from .external_evaluator_request import ExternalEvaluatorRequestParams
from .file_environment_response import FileEnvironmentResponseParams
from .file_environment_response_file import FileEnvironmentResponseFileParams
+from .file_environment_variable_request import FileEnvironmentVariableRequestParams
from .file_id import FileIdParams
from .file_path import FilePathParams
from .file_request import FileRequestParams
@@ -64,7 +94,9 @@
from .image_chat_content import ImageChatContentParams
from .image_url import ImageUrlParams
from .input_response import InputResponseParams
+from .linked_file_request import LinkedFileRequestParams
from .linked_tool_response import LinkedToolResponseParams
+from .list_agents import ListAgentsParams
from .list_datasets import ListDatasetsParams
from .list_evaluators import ListEvaluatorsParams
from .list_flows import ListFlowsParams
@@ -72,28 +104,31 @@
from .list_tools import ListToolsParams
from .llm_evaluator_request import LlmEvaluatorRequestParams
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .overall_stats import OverallStatsParams
+from .paginated_data_agent_response import PaginatedDataAgentResponseParams
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams
from .paginated_data_flow_response import PaginatedDataFlowResponseParams
from .paginated_data_log_response import PaginatedDataLogResponseParams
from .paginated_data_prompt_response import PaginatedDataPromptResponseParams
from .paginated_data_tool_response import PaginatedDataToolResponseParams
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
from .paginated_datapoint_response import PaginatedDatapointResponseParams
from .paginated_dataset_response import PaginatedDatasetResponseParams
from .paginated_evaluation_response import PaginatedEvaluationResponseParams
from .populate_template_response import PopulateTemplateResponseParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .populate_template_response_stop import PopulateTemplateResponseStopParams
from .populate_template_response_template import PopulateTemplateResponseTemplateParams
from .prompt_call_log_response import PromptCallLogResponseParams
@@ -101,11 +136,13 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_call_stream_response import PromptCallStreamResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .prompt_log_response import PromptLogResponseParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
from .prompt_response_template import PromptResponseTemplateParams
from .provider_api_keys import ProviderApiKeysParams
@@ -117,6 +154,7 @@
from .text_chat_content import TextChatContentParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
from .tool_call import ToolCallParams
+from .tool_call_response import ToolCallResponseParams
from .tool_choice import ToolChoiceParams
from .tool_function import ToolFunctionParams
from .tool_kernel_request import ToolKernelRequestParams
@@ -135,12 +173,41 @@
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
__all__ = [
+ "AgentCallResponseParams",
+ "AgentCallResponseToolChoiceParams",
+ "AgentCallStreamResponseParams",
+ "AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
+ "AgentInlineToolParams",
+ "AgentKernelRequestParams",
+ "AgentKernelRequestReasoningEffortParams",
+ "AgentKernelRequestStopParams",
+ "AgentKernelRequestTemplateParams",
+ "AgentKernelRequestToolsItemParams",
+ "AgentLinkedFileRequestParams",
+ "AgentLinkedFileResponseFileParams",
+ "AgentLinkedFileResponseParams",
+ "AgentLogResponseParams",
+ "AgentLogResponseToolChoiceParams",
+ "AgentLogStreamResponseParams",
+ "AgentResponseParams",
+ "AgentResponseReasoningEffortParams",
+ "AgentResponseStopParams",
+ "AgentResponseTemplateParams",
+ "AgentResponseToolsItemParams",
+ "AnthropicRedactedThinkingContentParams",
+ "AnthropicThinkingContentParams",
"BooleanEvaluatorStatsResponseParams",
"ChatMessageContentItemParams",
"ChatMessageContentParams",
"ChatMessageParams",
+ "ChatMessageThinkingItemParams",
"CodeEvaluatorRequestParams",
+ "CreateAgentLogResponseParams",
"CreateDatapointRequestParams",
"CreateDatapointRequestTargetValueParams",
"CreateEvaluatorLogResponseParams",
@@ -180,6 +247,7 @@
"ExternalEvaluatorRequestParams",
"FileEnvironmentResponseFileParams",
"FileEnvironmentResponseParams",
+ "FileEnvironmentVariableRequestParams",
"FileIdParams",
"FilePathParams",
"FileRequestParams",
@@ -193,7 +261,9 @@
"ImageChatContentParams",
"ImageUrlParams",
"InputResponseParams",
+ "LinkedFileRequestParams",
"LinkedToolResponseParams",
+ "ListAgentsParams",
"ListDatasetsParams",
"ListEvaluatorsParams",
"ListFlowsParams",
@@ -201,24 +271,27 @@
"ListToolsParams",
"LlmEvaluatorRequestParams",
"LogResponseParams",
+ "LogStreamResponseParams",
"MonitoringEvaluatorEnvironmentRequestParams",
"MonitoringEvaluatorResponseParams",
"MonitoringEvaluatorVersionRequestParams",
"NumericEvaluatorStatsResponseParams",
"OverallStatsParams",
+ "PaginatedDataAgentResponseParams",
"PaginatedDataEvaluationLogResponseParams",
"PaginatedDataEvaluatorResponseParams",
"PaginatedDataFlowResponseParams",
"PaginatedDataLogResponseParams",
"PaginatedDataPromptResponseParams",
"PaginatedDataToolResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams",
"PaginatedDatapointResponseParams",
"PaginatedDatasetResponseParams",
"PaginatedEvaluationResponseParams",
"PopulateTemplateResponseParams",
"PopulateTemplateResponsePopulatedTemplateParams",
+ "PopulateTemplateResponseReasoningEffortParams",
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplateParams",
"PromptCallLogResponseParams",
@@ -226,11 +299,13 @@
"PromptCallResponseToolChoiceParams",
"PromptCallStreamResponseParams",
"PromptKernelRequestParams",
+ "PromptKernelRequestReasoningEffortParams",
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplateParams",
"PromptLogResponseParams",
"PromptLogResponseToolChoiceParams",
"PromptResponseParams",
+ "PromptResponseReasoningEffortParams",
"PromptResponseStopParams",
"PromptResponseTemplateParams",
"ProviderApiKeysParams",
@@ -242,6 +317,7 @@
"TextChatContentParams",
"TextEvaluatorStatsResponseParams",
"ToolCallParams",
+ "ToolCallResponseParams",
"ToolChoiceParams",
"ToolFunctionParams",
"ToolKernelRequestParams",
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
new file mode 100644
index 00000000..ffc925ec
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..6cc9f9c4
--- /dev/null
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentCallResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
new file mode 100644
index 00000000..9555925d
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentCallStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..0e08a6f3
--- /dev/null
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
new file mode 100644
index 00000000..8300667b
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response.py
@@ -0,0 +1,202 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class AgentContinueResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..24b044cc
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentContinueResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
new file mode 100644
index 00000000..1038e000
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
+from ..types.event_type import EventType
+import datetime as dt
+
+
+class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
+ type: EventType
+ created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..ddd74c10
--- /dev/null
+++ b/src/humanloop/requests/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponseParams
+from .log_response import LogResponseParams
+from .tool_call import ToolCallParams
+
+AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
new file mode 100644
index 00000000..31f9401a
--- /dev/null
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .tool_function import ToolFunctionParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentInlineToolParams(typing_extensions.TypedDict):
+ type: typing.Literal["inline"]
+ json_schema: ToolFunctionParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
new file mode 100644
index 00000000..0ca76571
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -0,0 +1,112 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+
+
+class AgentKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentKernelRequestStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentKernelRequestReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]]
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..ea32bc11
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py
new file mode 100644
index 00000000..eae95d35
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
new file mode 100644
index 00000000..7261667d
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..27b63984
--- /dev/null
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequestParams
+from .agent_inline_tool import AgentInlineToolParams
+
+AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
new file mode 100644
index 00000000..18fc2274
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -0,0 +1,13 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+
+
+class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
new file mode 100644
index 00000000..8a690a77
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -0,0 +1,19 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing
+from .linked_file_request import LinkedFileRequestParams
+import typing_extensions
+from ..types.on_agent_call_enum import OnAgentCallEnum
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
+
+
+class AgentLinkedFileResponseParams(typing_extensions.TypedDict):
+ type: typing.Literal["file"]
+ link: LinkedFileRequestParams
+ on_agent_call: typing_extensions.NotRequired[OnAgentCallEnum]
+ file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"]
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
new file mode 100644
index 00000000..bb328de2
--- /dev/null
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponseParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
+ from .evaluator_response import EvaluatorResponseParams
+ from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
+AgentLinkedFileResponseFileParams = typing.Union[
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
+]
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
new file mode 100644
index 00000000..0cb24b8a
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response.py
@@ -0,0 +1,201 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from .chat_message import ChatMessageParams
+import typing
+from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
+from .agent_response import AgentResponseParams
+import datetime as dt
+from ..types.log_status import LogStatus
+import typing
+
+if typing.TYPE_CHECKING:
+ from .evaluator_log_response import EvaluatorLogResponseParams
+ from .log_response import LogResponseParams
+
+
+class AgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams]
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponseParams
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..e239a69c
--- /dev/null
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoiceParams
+
+AgentLogResponseToolChoiceParams = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
+]
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
new file mode 100644
index 00000000..710d55cf
--- /dev/null
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -0,0 +1,87 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .chat_message import ChatMessageParams
+
+
+class AgentLogStreamResponseParams(typing_extensions.TypedDict):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing_extensions.NotRequired[int]
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing_extensions.NotRequired[float]
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing_extensions.NotRequired[str]
+ """
+ Reason the generation finished.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ agent_id: str
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str
+ """
+ ID of the specific version of the Agent.
+ """
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
new file mode 100644
index 00000000..f482728d
--- /dev/null
+++ b/src/humanloop/requests/agent_response.py
@@ -0,0 +1,242 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing_extensions
+import typing_extensions
+from ..types.model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplateParams
+from ..types.template_language import TemplateLanguage
+from ..types.model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStopParams
+import typing
+from .response_format import ResponseFormatParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .environment_response import EnvironmentResponseParams
+import datetime as dt
+from ..types.user_response import UserResponse
+from ..types.version_status import VersionStatus
+from .input_response import InputResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_response_tools_item import AgentResponseToolsItemParams
+ from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
+
+
+class AgentResponseParams(typing_extensions.TypedDict):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing_extensions.NotRequired[str]
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing_extensions.NotRequired[ModelEndpoints]
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing_extensions.NotRequired[AgentResponseTemplateParams]
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing_extensions.NotRequired[TemplateLanguage]
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing_extensions.NotRequired[ModelProviders]
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing_extensions.NotRequired[int]
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing_extensions.NotRequired[float]
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing_extensions.NotRequired[float]
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing_extensions.NotRequired[AgentResponseStopParams]
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing_extensions.NotRequired[float]
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing_extensions.NotRequired[int]
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing_extensions.NotRequired[ResponseFormatParams]
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing_extensions.NotRequired[AgentResponseReasoningEffortParams]
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Sequence["AgentResponseToolsItemParams"]
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing_extensions.NotRequired[int]
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing_extensions.NotRequired[str]
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing_extensions.NotRequired[str]
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing_extensions.NotRequired[str]
+ """
+ Description of the Agent.
+ """
+
+ tags: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing_extensions.NotRequired[str]
+ """
+ Long description of the file.
+ """
+
+ name: str
+ """
+ Name of the Agent.
+ """
+
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing_extensions.NotRequired[typing.Literal["agent"]]
+ environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]]
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing_extensions.NotRequired[UserResponse]
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.Sequence[InputResponseParams]
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]]
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]]
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..de1b969f
--- /dev/null
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py
new file mode 100644
index 00000000..a395ee73
--- /dev/null
+++ b/src/humanloop/requests/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStopParams = typing.Union[str, typing.Sequence[str]]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
new file mode 100644
index 00000000..94be65f1
--- /dev/null
+++ b/src/humanloop/requests/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessageParams
+
+AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
new file mode 100644
index 00000000..5181579b
--- /dev/null
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineToolParams
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponseParams
+AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams]
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..3b328f7f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["redacted_thinking"]
+ data: str
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
new file mode 100644
index 00000000..34f6f99f
--- /dev/null
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -0,0 +1,17 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+
+
+class AnthropicThinkingContentParams(typing_extensions.TypedDict):
+ type: typing.Literal["thinking"]
+ thinking: str
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index cab8466d..6011653a 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -6,6 +6,7 @@
from ..types.chat_role import ChatRole
import typing
from .tool_call import ToolCallParams
+from .chat_message_thinking_item import ChatMessageThinkingItemParams
class ChatMessageParams(typing_extensions.TypedDict):
@@ -33,3 +34,8 @@ class ChatMessageParams(typing_extensions.TypedDict):
"""
A list of tool calls requested by the assistant.
"""
+
+ thinking: typing_extensions.NotRequired[typing.Sequence[ChatMessageThinkingItemParams]]
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
new file mode 100644
index 00000000..0691f4d8
--- /dev/null
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContentParams
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+
+ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
new file mode 100644
index 00000000..b1715517
--- /dev/null
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+from ..types.log_status import LogStatus
+
+
+class CreateAgentLogResponseParams(typing_extensions.TypedDict):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1d59ed4b..1cffd2b2 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -42,6 +42,11 @@ class DatasetResponseParams(typing_extensions.TypedDict):
Description of the Dataset.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index f101bf15..db9370b9 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,13 @@
from .evaluator_response import EvaluatorResponseParams
from .dataset_response import DatasetResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ EvaluatorResponseParams,
+ DatasetResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 908eeb2d..1ff836fb 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -57,6 +57,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict):
Description of the Evaluator.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 4ac6b0c3..04c0b51d 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -6,7 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
FileEnvironmentResponseFileParams = typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
]
diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py
new file mode 100644
index 00000000..bb70bda4
--- /dev/null
+++ b/src/humanloop/requests/file_environment_variable_request.py
@@ -0,0 +1,15 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+
+
+class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict):
+ name: str
+ """
+ Name of the environment variable.
+ """
+
+ value: str
+ """
+ Value of the environment variable.
+ """
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index 18a26d10..eebc9fd7 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -59,6 +59,11 @@ class FlowResponseParams(typing_extensions.TypedDict):
Description of the Flow.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the File.
+ """
+
readme: typing_extensions.NotRequired[str]
"""
Long description of the file.
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
new file mode 100644
index 00000000..2bbba19c
--- /dev/null
+++ b/src/humanloop/requests/linked_file_request.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+
+
+class LinkedFileRequestParams(typing_extensions.TypedDict):
+ file_id: str
+ environment_id: typing_extensions.NotRequired[str]
+ version_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
new file mode 100644
index 00000000..4a72f1db
--- /dev/null
+++ b/src/humanloop/requests/list_agents.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class ListAgentsParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ """
+ The list of Agents.
+ """
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index 15a4cff6..cb3ce212 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -9,6 +9,11 @@
from .tool_log_response import ToolLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
LogResponseParams = typing.Union[
- "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams"
+ "PromptLogResponseParams",
+ "ToolLogResponseParams",
+ "EvaluatorLogResponseParams",
+ "FlowLogResponseParams",
+ "AgentLogResponseParams",
]
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
new file mode 100644
index 00000000..e142e7fb
--- /dev/null
+++ b/src/humanloop/requests/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponseParams
+from .agent_log_stream_response import AgentLogStreamResponseParams
+
+LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
new file mode 100644
index 00000000..c8d67533
--- /dev/null
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -0,0 +1,12 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing
+from .agent_response import AgentResponseParams
+
+
+class PaginatedDataAgentResponseParams(typing_extensions.TypedDict):
+ records: typing.Sequence[AgentResponseParams]
+ page: int
+ size: int
+ total: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 65%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index cf8bc4bf..0e7adb64 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -2,16 +2,16 @@
import typing_extensions
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams(
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams(
typing_extensions.TypedDict
):
records: typing.Sequence[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams
]
page: int
size: int
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 58%
rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 1ba74108..b43a5521 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,9 +6,13 @@
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = (
- typing.Union[
- PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams
- ]
-)
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
+ PromptResponseParams,
+ ToolResponseParams,
+ DatasetResponseParams,
+ EvaluatorResponseParams,
+ FlowResponseParams,
+ AgentResponseParams,
+]
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 190341b0..40b62295 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -9,7 +9,7 @@
from .populate_template_response_stop import PopulateTemplateResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -119,9 +119,9 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PopulateTemplateResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -169,6 +169,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -213,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..6b1dd46a
--- /dev/null
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 61355166..1e4f56de 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -9,11 +9,17 @@
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
class PromptKernelRequestParams(typing_extensions.TypedDict):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -89,9 +95,9 @@ class PromptKernelRequestParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptKernelRequestReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..0c3d194b
--- /dev/null
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index 912866c5..05b4a71e 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -10,7 +10,7 @@
from .prompt_response_stop import PromptResponseStopParams
import typing
from .response_format import ResponseFormatParams
-from ..types.reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
from .linked_tool_response import LinkedToolResponseParams
from .environment_response import EnvironmentResponseParams
@@ -122,9 +122,9 @@ class PromptResponseParams(typing_extensions.TypedDict):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing_extensions.NotRequired[ReasoningEffort]
+ reasoning_effort: typing_extensions.NotRequired[PromptResponseReasoningEffortParams]
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing_extensions.NotRequired[typing.Sequence[ToolFunctionParams]]
@@ -172,6 +172,11 @@ class PromptResponseParams(typing_extensions.TypedDict):
Name of the Prompt.
"""
+ schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -215,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ content: typing_extensions.NotRequired[str]
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..4d019051
--- /dev/null
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 879ea25c..569d0d76 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -5,7 +5,8 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+from .agent_response import AgentResponseParams
RunVersionResponseParams = typing.Union[
- PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams
+ PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
new file mode 100644
index 00000000..1c92b28f
--- /dev/null
+++ b/src/humanloop/requests/tool_call_response.py
@@ -0,0 +1,146 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing_extensions
+import typing_extensions
+import datetime as dt
+from .tool_response import ToolResponseParams
+import typing
+from ..types.log_status import LogStatus
+from .evaluator_log_response import EvaluatorLogResponseParams
+from .log_response import LogResponseParams
+
+
+class ToolCallResponseParams(typing_extensions.TypedDict):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event started.
+ """
+
+ end_time: typing_extensions.NotRequired[dt.datetime]
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponseParams
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing_extensions.NotRequired[str]
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing_extensions.NotRequired[dt.datetime]
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing_extensions.NotRequired[str]
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing_extensions.NotRequired[float]
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing_extensions.NotRequired[str]
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing_extensions.NotRequired[str]
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing_extensions.NotRequired[LogStatus]
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing_extensions.NotRequired[str]
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing_extensions.NotRequired[str]
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing_extensions.NotRequired[typing.Sequence[str]]
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing_extensions.NotRequired[str]
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing_extensions.NotRequired[str]
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing_extensions.NotRequired[bool]
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing_extensions.NotRequired[str]
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing_extensions.NotRequired[str]
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing_extensions.NotRequired[str]
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
+ """
+ Logs nested under this Log in the Trace.
+ """
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index bac9dbbb..1aa0daea 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -7,6 +7,7 @@
import typing
from ..types.log_status import LogStatus
from .tool_response import ToolResponseParams
+from .chat_message import ChatMessageParams
import typing
if typing.TYPE_CHECKING:
@@ -148,3 +149,8 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
"""
Tool used to generate the Log.
"""
+
+ output_message: typing_extensions.NotRequired[ChatMessageParams]
+ """
+ The message returned by the Tool.
+ """
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 8a16af00..9659cb49 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionDeploymentResponseFileParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 50ecf7bc..9c317679 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -10,6 +10,12 @@
from .tool_response import ToolResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
+ from .agent_response import AgentResponseParams
VersionIdResponseVersionParams = typing.Union[
- "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams"
+ "PromptResponseParams",
+ "ToolResponseParams",
+ DatasetResponseParams,
+ "EvaluatorResponseParams",
+ "FlowResponseParams",
+ "AgentResponseParams",
]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index 16d75bd7..ea6b14a2 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -3,10 +3,11 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawToolsClient
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
+from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..types.project_sort_by import ProjectSortBy
@@ -29,6 +30,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawToolsClient
from ..core.pagination import AsyncPager
@@ -52,6 +55,133 @@ def with_raw_response(self) -> RawToolsClient:
"""
return self._raw_client
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.call()
+ """
+ response = self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
def log(
self,
*,
@@ -59,6 +189,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -78,7 +209,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -106,6 +236,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -163,9 +296,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -206,6 +336,7 @@ def log(
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -225,7 +356,6 @@ def log(
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -966,6 +1096,112 @@ def update_monitoring(
)
return response.data
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.get_environment_variables(
+ id="id",
+ )
+ """
+ response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+ """
+ response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ from humanloop import Humanloop
+
+ client = Humanloop(
+ api_key="YOUR_API_KEY",
+ )
+ client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+ """
+ response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
+
class AsyncToolsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -982,6 +1218,141 @@ def with_raw_response(self) -> AsyncRawToolsClient:
"""
return self._raw_client
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> ToolCallResponse:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ToolCallResponse
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.call()
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.call(
+ version_id=version_id,
+ environment=environment,
+ path=path,
+ id=id,
+ tool=tool,
+ inputs=inputs,
+ source=source,
+ metadata=metadata,
+ start_time=start_time,
+ end_time=end_time,
+ log_status=log_status,
+ source_datapoint_id=source_datapoint_id,
+ trace_parent_id=trace_parent_id,
+ user=user,
+ tool_call_request_environment=tool_call_request_environment,
+ save=save,
+ log_id=log_id,
+ request_options=request_options,
+ )
+ return response.data
+
async def log(
self,
*,
@@ -989,6 +1360,7 @@ async def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1008,7 +1380,6 @@ async def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CreateToolLogResponse:
"""
@@ -1036,6 +1407,9 @@ async def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1093,9 +1467,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1144,6 +1515,7 @@ async def main() -> None:
environment=environment,
path=path,
id=id,
+ tool=tool,
start_time=start_time,
end_time=end_time,
output=output,
@@ -1163,7 +1535,6 @@ async def main() -> None:
tool_log_request_environment=tool_log_request_environment,
save=save,
log_id=log_id,
- tool=tool,
request_options=request_options,
)
return response.data
@@ -2010,3 +2381,133 @@ async def main() -> None:
id, activate=activate, deactivate=deactivate, request_options=request_options
)
return response.data
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.get_environment_variables(
+ id="id",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return response.data
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.add_environment_variable(
+ id="id",
+ request=[{"name": "name", "value": "value"}],
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return response.data
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> typing.List[FileEnvironmentVariableRequest]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[FileEnvironmentVariableRequest]
+ Successful Response
+
+ Examples
+ --------
+ import asyncio
+
+ from humanloop import AsyncHumanloop
+
+ client = AsyncHumanloop(
+ api_key="YOUR_API_KEY",
+ )
+
+
+ async def main() -> None:
+ await client.tools.delete_environment_variable(
+ id="id",
+ name="name",
+ )
+
+
+ asyncio.run(main())
+ """
+ response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 4a1f29e9..b412b771 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -2,18 +2,19 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
+from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
from ..types.log_status import LogStatus
-from ..requests.tool_kernel_request import ToolKernelRequestParams
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
+from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from ..requests.tool_function import ToolFunctionParams
@@ -27,6 +28,8 @@
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
from ..core.client_wrapper import AsyncClientWrapper
from ..core.http_response import AsyncHttpResponse
@@ -38,6 +41,159 @@ class RawToolsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def log(
self,
*,
@@ -45,6 +201,7 @@ def log(
environment: typing.Optional[str] = None,
path: typing.Optional[str] = OMIT,
id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -64,7 +221,6 @@ def log(
tool_log_request_environment: typing.Optional[str] = OMIT,
save: typing.Optional[bool] = OMIT,
log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[CreateToolLogResponse]:
"""
@@ -92,6 +248,9 @@ def log(
id : typing.Optional[str]
ID for an existing Tool.
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -149,9 +308,6 @@ def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -170,6 +326,9 @@ def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -189,9 +348,6 @@ def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -1038,75 +1194,387 @@ def update_monitoring(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
-class AsyncRawToolsClient:
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
- self._client_wrapper = client_wrapper
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- async def log(
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def add_environment_variable(
self,
+ id: str,
*,
- version_id: typing.Optional[str] = None,
- environment: typing.Optional[str] = None,
- path: typing.Optional[str] = OMIT,
- id: typing.Optional[str] = OMIT,
- start_time: typing.Optional[dt.datetime] = OMIT,
- end_time: typing.Optional[dt.datetime] = OMIT,
- output: typing.Optional[str] = OMIT,
- created_at: typing.Optional[dt.datetime] = OMIT,
- error: typing.Optional[str] = OMIT,
- provider_latency: typing.Optional[float] = OMIT,
- stdout: typing.Optional[str] = OMIT,
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- source: typing.Optional[str] = OMIT,
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
- source_datapoint_id: typing.Optional[str] = OMIT,
- trace_parent_id: typing.Optional[str] = OMIT,
- user: typing.Optional[str] = OMIT,
- tool_log_request_environment: typing.Optional[str] = OMIT,
- save: typing.Optional[bool] = OMIT,
- log_id: typing.Optional[str] = OMIT,
- tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
"""
- Log to a Tool.
-
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Tool. Otherwise the default deployed version will be chosen.
-
- Instead of targeting an existing version explicitly, you can instead pass in
- Tool details in the request body. In this case, we will check if the details correspond
- to an existing version of the Tool, if not we will create a new version. This is helpful
- in the case where you are storing or deriving your Tool details in code.
+ Add an environment variable to a Tool.
Parameters
----------
- version_id : typing.Optional[str]
- A specific Version ID of the Tool to log to.
-
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
-
- path : typing.Optional[str]
- Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
-
- id : typing.Optional[str]
- ID for an existing Tool.
-
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ id : str
+ Unique identifier for Tool.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- created_at : typing.Optional[dt.datetime]
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> HttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncRawToolsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def call(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_call_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[ToolCallResponse]:
+ """
+ Call a Tool.
+
+ Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise, the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to call.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to call.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
+
+ source : typing.Optional[str]
+ Identifies where the model was called from.
+
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ log_status : typing.Optional[LogStatus]
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
+
+ user : typing.Optional[str]
+ End-user ID related to the Log.
+
+ tool_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
+
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
+
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[ToolCallResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools/call",
+ method="POST",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ },
+ json={
+ "path": path,
+ "id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
+ "inputs": inputs,
+ "source": source,
+ "metadata": metadata,
+ "start_time": start_time,
+ "end_time": end_time,
+ "log_status": log_status,
+ "source_datapoint_id": source_datapoint_id,
+ "trace_parent_id": trace_parent_id,
+ "user": user,
+ "environment": tool_call_request_environment,
+ "save": save,
+ "log_id": log_id,
+ },
+ headers={
+ "content-type": "application/json",
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ ToolCallResponse,
+ construct_type(
+ type_=ToolCallResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def log(
+ self,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ path: typing.Optional[str] = OMIT,
+ id: typing.Optional[str] = OMIT,
+ tool: typing.Optional[ToolKernelRequestParams] = OMIT,
+ start_time: typing.Optional[dt.datetime] = OMIT,
+ end_time: typing.Optional[dt.datetime] = OMIT,
+ output: typing.Optional[str] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ error: typing.Optional[str] = OMIT,
+ provider_latency: typing.Optional[float] = OMIT,
+ stdout: typing.Optional[str] = OMIT,
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ source: typing.Optional[str] = OMIT,
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ log_status: typing.Optional[LogStatus] = OMIT,
+ source_datapoint_id: typing.Optional[str] = OMIT,
+ trace_parent_id: typing.Optional[str] = OMIT,
+ user: typing.Optional[str] = OMIT,
+ tool_log_request_environment: typing.Optional[str] = OMIT,
+ save: typing.Optional[bool] = OMIT,
+ log_id: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[CreateToolLogResponse]:
+ """
+ Log to a Tool.
+
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Tool. Otherwise the default deployed version will be chosen.
+
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Tool details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Tool, if not we will create a new version. This is helpful
+ in the case where you are storing or deriving your Tool details in code.
+
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Tool to log to.
+
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
+
+ path : typing.Optional[str]
+ Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+
+ id : typing.Optional[str]
+ ID for an existing Tool.
+
+ tool : typing.Optional[ToolKernelRequestParams]
+ Details of your Tool. A new Tool version will be created if the provided details are new.
+
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
+
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
+
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+
+ created_at : typing.Optional[dt.datetime]
User defined timestamp for when the log was created.
error : typing.Optional[str]
@@ -1154,9 +1622,6 @@ async def log(
log_id : typing.Optional[str]
This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- tool : typing.Optional[ToolKernelRequestParams]
- Details of your Tool. A new Tool version will be created if the provided details are new.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1175,6 +1640,9 @@ async def log(
json={
"path": path,
"id": id,
+ "tool": convert_and_respect_annotation_metadata(
+ object_=tool, annotation=ToolKernelRequestParams, direction="write"
+ ),
"start_time": start_time,
"end_time": end_time,
"output": output,
@@ -1194,9 +1662,6 @@ async def log(
"environment": tool_log_request_environment,
"save": save,
"log_id": log_id,
- "tool": convert_and_respect_annotation_metadata(
- object_=tool, annotation=ToolKernelRequestParams, direction="write"
- ),
},
headers={
"content-type": "application/json",
@@ -2044,3 +2509,159 @@ async def update_monitoring(
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def get_environment_variables(
+ self, id: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="GET",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def add_environment_variable(
+ self,
+ id: str,
+ *,
+ request: typing.Sequence[FileEnvironmentVariableRequestParams],
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Add an environment variable to a Tool.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Tool.
+
+ request : typing.Sequence[FileEnvironmentVariableRequestParams]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables",
+ method="POST",
+ json=convert_and_respect_annotation_metadata(
+ object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
+ ),
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def delete_environment_variable(
+ self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
+ ) -> AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]:
+ """
+ Parameters
+ ----------
+ id : str
+ Unique identifier for File.
+
+ name : str
+ Name of the Environment Variable to delete.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[typing.List[FileEnvironmentVariableRequest]]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}",
+ method="DELETE",
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ typing.List[FileEnvironmentVariableRequest],
+ construct_type(
+ type_=typing.List[FileEnvironmentVariableRequest], # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 156f4e9a..8130325d 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,15 +1,44 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_call_response import AgentCallResponse
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+from .agent_call_stream_response import AgentCallStreamResponse
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .agent_inline_tool import AgentInlineTool
+from .agent_kernel_request import AgentKernelRequest
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile
+from .agent_log_response import AgentLogResponse
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+from .agent_log_stream_response import AgentLogStreamResponse
+from .agent_response import AgentResponse
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
+from .agent_response_tools_item import AgentResponseToolsItem
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
from .base_models_user_response import BaseModelsUserResponse
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
from .chat_message import ChatMessage
from .chat_message_content import ChatMessageContent
from .chat_message_content_item import ChatMessageContentItem
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .chat_tool_type import ChatToolType
from .code_evaluator_request import CodeEvaluatorRequest
from .config_tool_response import ConfigToolResponse
+from .create_agent_log_response import CreateAgentLogResponse
from .create_datapoint_request import CreateDatapointRequest
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
from .create_evaluator_log_response import CreateEvaluatorLogResponse
@@ -56,10 +85,12 @@
from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .evaluator_version_id import EvaluatorVersionId
from .evaluators_request import EvaluatorsRequest
+from .event_type import EventType
from .external_evaluator_request import ExternalEvaluatorRequest
from .feedback_type import FeedbackType
from .file_environment_response import FileEnvironmentResponse
from .file_environment_response_file import FileEnvironmentResponseFile
+from .file_environment_variable_request import FileEnvironmentVariableRequest
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
@@ -77,7 +108,9 @@
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .input_response import InputResponse
+from .linked_file_request import LinkedFileRequest
from .linked_tool_response import LinkedToolResponse
+from .list_agents import ListAgents
from .list_datasets import ListDatasets
from .list_evaluators import ListEvaluators
from .list_flows import ListFlows
@@ -86,6 +119,7 @@
from .llm_evaluator_request import LlmEvaluatorRequest
from .log_response import LogResponse
from .log_status import LogStatus
+from .log_stream_response import LogStreamResponse
from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
@@ -94,18 +128,21 @@
from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .observability_status import ObservabilityStatus
+from .on_agent_call_enum import OnAgentCallEnum
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
from .overall_stats import OverallStats
+from .paginated_data_agent_response import PaginatedDataAgentResponse
from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from .paginated_data_flow_response import PaginatedDataFlowResponse
from .paginated_data_log_response import PaginatedDataLogResponse
from .paginated_data_prompt_response import PaginatedDataPromptResponse
from .paginated_data_tool_response import PaginatedDataToolResponse
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from .paginated_datapoint_response import PaginatedDatapointResponse
from .paginated_dataset_response import PaginatedDatasetResponse
@@ -115,6 +152,7 @@
from .platform_access_enum import PlatformAccessEnum
from .populate_template_response import PopulateTemplateResponse
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
from .project_sort_by import ProjectSortBy
@@ -123,15 +161,16 @@
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .prompt_call_stream_response import PromptCallStreamResponse
from .prompt_kernel_request import PromptKernelRequest
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .prompt_log_response import PromptLogResponse
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
from .prompt_response import PromptResponse
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
from .prompt_response_template import PromptResponseTemplate
from .provider_api_keys import ProviderApiKeys
-from .reasoning_effort import ReasoningEffort
from .response_format import ResponseFormat
from .response_format_type import ResponseFormatType
from .run_stats_response import RunStatsResponse
@@ -144,6 +183,7 @@
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
from .time_unit import TimeUnit
from .tool_call import ToolCall
+from .tool_call_response import ToolCallResponse
from .tool_choice import ToolChoice
from .tool_function import ToolFunction
from .tool_kernel_request import ToolKernelRequest
@@ -167,16 +207,45 @@
from .version_status import VersionStatus
__all__ = [
+ "AgentCallResponse",
+ "AgentCallResponseToolChoice",
+ "AgentCallStreamResponse",
+ "AgentCallStreamResponsePayload",
"AgentConfigResponse",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
+ "AgentInlineTool",
+ "AgentKernelRequest",
+ "AgentKernelRequestReasoningEffort",
+ "AgentKernelRequestStop",
+ "AgentKernelRequestTemplate",
+ "AgentKernelRequestToolsItem",
+ "AgentLinkedFileRequest",
+ "AgentLinkedFileResponse",
+ "AgentLinkedFileResponseFile",
+ "AgentLogResponse",
+ "AgentLogResponseToolChoice",
+ "AgentLogStreamResponse",
+ "AgentResponse",
+ "AgentResponseReasoningEffort",
+ "AgentResponseStop",
+ "AgentResponseTemplate",
+ "AgentResponseToolsItem",
+ "AnthropicRedactedThinkingContent",
+ "AnthropicThinkingContent",
"BaseModelsUserResponse",
"BooleanEvaluatorStatsResponse",
"ChatMessage",
"ChatMessageContent",
"ChatMessageContentItem",
+ "ChatMessageThinkingItem",
"ChatRole",
"ChatToolType",
"CodeEvaluatorRequest",
"ConfigToolResponse",
+ "CreateAgentLogResponse",
"CreateDatapointRequest",
"CreateDatapointRequestTargetValue",
"CreateEvaluatorLogResponse",
@@ -221,10 +290,12 @@
"EvaluatorReturnTypeEnum",
"EvaluatorVersionId",
"EvaluatorsRequest",
+ "EventType",
"ExternalEvaluatorRequest",
"FeedbackType",
"FileEnvironmentResponse",
"FileEnvironmentResponseFile",
+ "FileEnvironmentVariableRequest",
"FileId",
"FilePath",
"FileRequest",
@@ -242,7 +313,9 @@
"ImageUrl",
"ImageUrlDetail",
"InputResponse",
+ "LinkedFileRequest",
"LinkedToolResponse",
+ "ListAgents",
"ListDatasets",
"ListEvaluators",
"ListFlows",
@@ -251,6 +324,7 @@
"LlmEvaluatorRequest",
"LogResponse",
"LogStatus",
+ "LogStreamResponse",
"ModelEndpoints",
"ModelProviders",
"MonitoringEvaluatorEnvironmentRequest",
@@ -259,15 +333,18 @@
"MonitoringEvaluatorVersionRequest",
"NumericEvaluatorStatsResponse",
"ObservabilityStatus",
+ "OnAgentCallEnum",
+ "OpenAiReasoningEffort",
"OverallStats",
+ "PaginatedDataAgentResponse",
"PaginatedDataEvaluationLogResponse",
"PaginatedDataEvaluatorResponse",
"PaginatedDataFlowResponse",
"PaginatedDataLogResponse",
"PaginatedDataPromptResponse",
"PaginatedDataToolResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse",
- "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse",
+ "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem",
"PaginatedDatapointResponse",
"PaginatedDatasetResponse",
"PaginatedEvaluationResponse",
@@ -276,6 +353,7 @@
"PlatformAccessEnum",
"PopulateTemplateResponse",
"PopulateTemplateResponsePopulatedTemplate",
+ "PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
"ProjectSortBy",
@@ -284,15 +362,16 @@
"PromptCallResponseToolChoice",
"PromptCallStreamResponse",
"PromptKernelRequest",
+ "PromptKernelRequestReasoningEffort",
"PromptKernelRequestStop",
"PromptKernelRequestTemplate",
"PromptLogResponse",
"PromptLogResponseToolChoice",
"PromptResponse",
+ "PromptResponseReasoningEffort",
"PromptResponseStop",
"PromptResponseTemplate",
"ProviderApiKeys",
- "ReasoningEffort",
"ResponseFormat",
"ResponseFormatType",
"RunStatsResponse",
@@ -305,6 +384,7 @@
"TextEvaluatorStatsResponse",
"TimeUnit",
"ToolCall",
+ "ToolCallResponse",
"ToolChoice",
"ToolFunction",
"ToolKernelRequest",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
new file mode 100644
index 00000000..ba3bbfec
--- /dev/null
+++ b/src/humanloop/types/agent_call_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_call_response_tool_choice import AgentCallResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
new file mode 100644
index 00000000..95eca73e
--- /dev/null
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentCallResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
new file mode 100644
index 00000000..673d3738
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentCallStreamResponse(UncheckedBaseModel):
+ """
+ Response model for calling Agent in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentCallStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
new file mode 100644
index 00000000..85422047
--- /dev/null
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
new file mode 100644
index 00000000..0bbd7858
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentContinueResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
new file mode 100644
index 00000000..20f3fb75
--- /dev/null
+++ b/src/humanloop/types/agent_continue_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentContinueResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
new file mode 100644
index 00000000..ff7a0fac
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response.py
@@ -0,0 +1,44 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_log_response import FlowLogResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_log_response import PromptLogResponse
+from .prompt_response import PromptResponse
+from .tool_log_response import ToolLogResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
+from .event_type import EventType
+import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentContinueStreamResponse(UncheckedBaseModel):
+ """
+ Response model for continuing an Agent call in streaming mode.
+ """
+
+ log_id: str
+ message: str
+ payload: typing.Optional[AgentContinueStreamResponsePayload] = None
+ type: EventType
+ created_at: dt.datetime
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
new file mode 100644
index 00000000..0e5f8a58
--- /dev/null
+++ b/src/humanloop/types/agent_continue_stream_response_payload.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .log_stream_response import LogStreamResponse
+from .log_response import LogResponse
+from .tool_call import ToolCall
+
+AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
new file mode 100644
index 00000000..dc618c35
--- /dev/null
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .tool_function import ToolFunction
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentInlineTool(UncheckedBaseModel):
+ type: typing.Literal["inline"] = "inline"
+ json_schema: ToolFunction
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
new file mode 100644
index 00000000..6503b104
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -0,0 +1,122 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_kernel_request_template import AgentKernelRequestTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_kernel_request_stop import AgentKernelRequestStop
+from .response_format import ResponseFormat
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentKernelRequestReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..a8e8e98b
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py
new file mode 100644
index 00000000..e38c12e2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentKernelRequestStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
new file mode 100644
index 00000000..31a351f2
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
new file mode 100644
index 00000000..82c2fecf
--- /dev/null
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .agent_linked_file_request import AgentLinkedFileRequest
+from .agent_inline_tool import AgentInlineTool
+
+AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
new file mode 100644
index 00000000..9efd4b6a
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class AgentLinkedFileRequest(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
new file mode 100644
index 00000000..d85d682e
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -0,0 +1,39 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from .linked_file_request import LinkedFileRequest
+from .on_agent_call_enum import OnAgentCallEnum
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLinkedFileResponse(UncheckedBaseModel):
+ type: typing.Literal["file"] = "file"
+ link: LinkedFileRequest
+ on_agent_call: typing.Optional[OnAgentCallEnum] = None
+ file: typing.Optional["AgentLinkedFileResponseFile"] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_response import AgentResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+
+update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
new file mode 100644
index 00000000..42d38fe4
--- /dev/null
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -0,0 +1,16 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .dataset_response import DatasetResponse
+import typing
+
+if typing.TYPE_CHECKING:
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
+ from .evaluator_response import EvaluatorResponse
+ from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
+AgentLinkedFileResponseFile = typing.Union[
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
+]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
new file mode 100644
index 00000000..f5b5e8e8
--- /dev/null
+++ b/src/humanloop/types/agent_log_response.py
@@ -0,0 +1,224 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from .chat_message import ChatMessage
+import pydantic
+from .agent_log_response_tool_choice import AgentLogResponseToolChoice
+import datetime as dt
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentLogResponse(UncheckedBaseModel):
+ """
+ General request for creating a Log
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
+ """
+ The messages passed to the to provider chat endpoint.
+ """
+
+ tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None)
+ """
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ """
+
+ agent: AgentResponse = pydantic.Field()
+ """
+ Agent that generated the Log.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Trace that the Log belongs to.
+ """
+
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
+from .flow_log_response import FlowLogResponse # noqa: E402
+from .prompt_log_response import PromptLogResponse # noqa: E402
+from .tool_log_response import ToolLogResponse # noqa: E402
+from .log_response import LogResponse # noqa: E402
+
+update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
new file mode 100644
index 00000000..5cb07628
--- /dev/null
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -0,0 +1,8 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .tool_choice import ToolChoice
+
+AgentLogResponseToolChoice = typing.Union[
+ typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
+]
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
new file mode 100644
index 00000000..91547189
--- /dev/null
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -0,0 +1,98 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+import datetime as dt
+from .chat_message import ChatMessage
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AgentLogStreamResponse(UncheckedBaseModel):
+ """
+ Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the provider.
+ """
+
+ prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the prompt used to generate the output.
+ """
+
+ reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of reasoning tokens used to generate the output.
+ """
+
+ output_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ Number of tokens in the output generated by the model.
+ """
+
+ prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the prompt.
+ """
+
+ output_cost: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Cost in dollars associated to the tokens in the output.
+ """
+
+ finish_reason: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Reason the generation finished.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ ID of the Agent the log belongs to.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ ID of the specific version of the Agent.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
new file mode 100644
index 00000000..0487d7b7
--- /dev/null
+++ b/src/humanloop/types/agent_response.py
@@ -0,0 +1,265 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .model_endpoints import ModelEndpoints
+from .agent_response_template import AgentResponseTemplate
+from .template_language import TemplateLanguage
+from .model_providers import ModelProviders
+from .agent_response_stop import AgentResponseStop
+from .response_format import ResponseFormat
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+import typing_extensions
+from ..core.serialization import FieldMetadata
+from .environment_response import EnvironmentResponse
+import datetime as dt
+from .user_response import UserResponse
+from .version_status import VersionStatus
+from .input_response import InputResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import update_forward_refs
+
+
+class AgentResponse(UncheckedBaseModel):
+ """
+ Base type that all File Responses should inherit from.
+
+ Attributes defined here are common to all File Responses and should be overridden
+ in the inheriting classes with documentation and appropriate Field definitions.
+ """
+
+ path: str = pydantic.Field()
+ """
+ Path of the Agent, including the name, which is used as a unique identifier.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ directory_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the directory that the file is in on Humanloop.
+ """
+
+ model: str = pydantic.Field()
+ """
+ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
+ """
+
+ endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None)
+ """
+ The provider model endpoint used.
+ """
+
+ template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None)
+ """
+ The template contains the main structure and instructions for the model, including input variables for dynamic values.
+
+ For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable.
+ For completion models, provide a prompt template as a string.
+
+ Input variables should be specified with double curly bracket syntax: `{{input_name}}`.
+ """
+
+ template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None)
+ """
+ The template language to use for rendering the template.
+ """
+
+ provider: typing.Optional[ModelProviders] = pydantic.Field(default=None)
+ """
+ The company providing the underlying model service.
+ """
+
+ max_tokens: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt
+ """
+
+ temperature: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ What sampling temperature to use when making a generation. Higher values means the model will be more creative.
+ """
+
+ top_p: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
+ """
+
+ stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None)
+ """
+ The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence.
+ """
+
+ presence_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far.
+ """
+
+ frequency_penalty: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far.
+ """
+
+ other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Other parameter values to be passed to the provider call.
+ """
+
+ seed: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ If specified, model will make a best effort to sample deterministically, but it is not guaranteed.
+ """
+
+ response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None)
+ """
+ The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
+ """
+
+ reasoning_effort: typing.Optional[AgentResponseReasoningEffort] = pydantic.Field(default=None)
+ """
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
+ """
+
+ tools: typing.List["AgentResponseToolsItem"] = pydantic.Field()
+ """
+ List of tools that the Agent can call. These can be linked files or inline tools.
+ """
+
+ attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used.
+ """
+
+ max_iterations: typing.Optional[int] = pydantic.Field(default=None)
+ """
+ The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called.
+ """
+
+ version_name: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique name for the Agent version. Version names must be unique for a given Agent.
+ """
+
+ version_description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the version, e.g., the changes made in this version.
+ """
+
+ description: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Description of the Agent.
+ """
+
+ tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ List of tags associated with the file.
+ """
+
+ readme: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Long description of the file.
+ """
+
+ name: str = pydantic.Field()
+ """
+ Name of the Agent.
+ """
+
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned.
+ """
+
+ type: typing.Optional[typing.Literal["agent"]] = None
+ environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None)
+ """
+ The list of environments the Agent Version is deployed to.
+ """
+
+ created_at: dt.datetime
+ updated_at: dt.datetime
+ created_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who created the Agent.
+ """
+
+ committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None)
+ """
+ The user who committed the Agent Version.
+ """
+
+ committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ The date and time the Agent Version was committed.
+ """
+
+ status: VersionStatus = pydantic.Field()
+ """
+ The status of the Agent Version.
+ """
+
+ last_used_at: dt.datetime
+ version_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated for this Agent Version
+ """
+
+ total_logs_count: int = pydantic.Field()
+ """
+ The number of logs that have been generated across all Agent Versions
+ """
+
+ inputs: typing.List[InputResponse] = pydantic.Field()
+ """
+ Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template.
+ """
+
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
+ """
+ Evaluators that have been attached to this Agent that are used for monitoring logs.
+ """
+
+ evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None)
+ """
+ Aggregation of Evaluator results for the Agent Version.
+ """
+
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Agent. Corresponds to the .agent file.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .evaluator_response import EvaluatorResponse # noqa: E402
+from .flow_response import FlowResponse # noqa: E402
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
+from .prompt_response import PromptResponse # noqa: E402
+from .tool_response import ToolResponse # noqa: E402
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402
+from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+
+update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
new file mode 100644
index 00000000..59254f38
--- /dev/null
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py
new file mode 100644
index 00000000..5c3b6a48
--- /dev/null
+++ b/src/humanloop/types/agent_response_stop.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AgentResponseStop = typing.Union[str, typing.List[str]]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
new file mode 100644
index 00000000..4c084dc8
--- /dev/null
+++ b/src/humanloop/types/agent_response_template.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .chat_message import ChatMessage
+
+AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
new file mode 100644
index 00000000..8095608f
--- /dev/null
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from __future__ import annotations
+import typing
+from .agent_inline_tool import AgentInlineTool
+import typing
+
+if typing.TYPE_CHECKING:
+ from .agent_linked_file_response import AgentLinkedFileResponse
+AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool]
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
new file mode 100644
index 00000000..ebac897b
--- /dev/null
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -0,0 +1,23 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicRedactedThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["redacted_thinking"] = "redacted_thinking"
+ data: str = pydantic.Field()
+ """
+ Thinking block Anthropic redacted for safety reasons. User is expected to pass the block back to Anthropic
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
new file mode 100644
index 00000000..bf7fc808
--- /dev/null
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -0,0 +1,28 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class AnthropicThinkingContent(UncheckedBaseModel):
+ type: typing.Literal["thinking"] = "thinking"
+ thinking: str = pydantic.Field()
+ """
+ Model's chain-of-thought for providing the response.
+ """
+
+ signature: str = pydantic.Field()
+ """
+ Cryptographic signature that verifies the thinking block was generated by Anthropic.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c09f2768..c72bc90d 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -6,6 +6,7 @@
import pydantic
from .chat_role import ChatRole
from .tool_call import ToolCall
+from .chat_message_thinking_item import ChatMessageThinkingItem
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -35,6 +36,11 @@ class ChatMessage(UncheckedBaseModel):
A list of tool calls requested by the assistant.
"""
+ thinking: typing.Optional[typing.List[ChatMessageThinkingItem]] = pydantic.Field(default=None)
+ """
+ Model's chain-of-thought for providing the response. Present on assistant messages if model supports it.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
new file mode 100644
index 00000000..0a507724
--- /dev/null
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .anthropic_thinking_content import AnthropicThinkingContent
+from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+
+ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
new file mode 100644
index 00000000..9dc66629
--- /dev/null
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -0,0 +1,42 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+import typing
+from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class CreateAgentLogResponse(UncheckedBaseModel):
+ """
+ Response for an Agent Log.
+ """
+
+ id: str = pydantic.Field()
+ """
+ Unique identifier for the Log.
+ """
+
+ agent_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent.
+ """
+
+ version_id: str = pydantic.Field()
+ """
+ Unique identifier for the Agent Version.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index af79f597..2c614521 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -3,6 +3,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -43,6 +45,13 @@ class DatasetResponse(UncheckedBaseModel):
Description of the Dataset.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 5828a678..51f879b8 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 0bfeebf7..9d0d5fc4 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -6,7 +6,8 @@
from .evaluator_response import EvaluatorResponse
from .dataset_response import DatasetResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
- PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse
+ PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 9ba9fe4d..4332aa12 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 413081c6..0c7de27e 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 6c931db0..84d117e2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index f113fff5..bcda94a4 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 1203ce2c..74d59e4c 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index d91e1ee9..e09b2a73 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e457d580..71ca76c0 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -189,6 +191,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 175f456d..712ca698 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -5,6 +5,8 @@
import pydantic
import typing
from .evaluator_response_spec import EvaluatorResponseSpec
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -55,6 +57,13 @@ class EvaluatorResponse(UncheckedBaseModel):
Description of the Evaluator.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -124,6 +133,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
new file mode 100644
index 00000000..128eed92
--- /dev/null
+++ b/src/humanloop/types/event_type.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+EventType = typing.Union[
+ typing.Literal[
+ "agent_turn_start",
+ "agent_turn_suspend",
+ "agent_turn_continue",
+ "agent_turn_end",
+ "agent_start",
+ "agent_update",
+ "agent_end",
+ "tool_start",
+ "tool_update",
+ "tool_end",
+ "error",
+ "agent_generation_error",
+ ],
+ typing.Any,
+]
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 70ed322f..7f34b7b3 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 2a105c9d..0254c2b8 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
FileEnvironmentResponseFile = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
+ PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
]
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
new file mode 100644
index 00000000..8108245b
--- /dev/null
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -0,0 +1,27 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import typing
+
+
+class FileEnvironmentVariableRequest(UncheckedBaseModel):
+ name: str = pydantic.Field()
+ """
+ Name of the environment variable.
+ """
+
+ value: str = pydantic.Field()
+ """
+ Value of the environment variable.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py
index 7a870b84..f235825b 100644
--- a/src/humanloop/types/file_type.py
+++ b/src/humanloop/types/file_type.py
@@ -2,4 +2,4 @@
import typing
-FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any]
+FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any]
diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py
index c32b9755..753d9ba2 100644
--- a/src/humanloop/types/files_tool_type.py
+++ b/src/humanloop/types/files_tool_type.py
@@ -3,5 +3,5 @@
import typing
FilesToolType = typing.Union[
- typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any
+ typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index ba1e1cf6..58a87fac 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -173,6 +175,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 4017b3b7..7768778e 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -4,6 +4,8 @@
from ..core.unchecked_base_model import UncheckedBaseModel
import pydantic
import typing
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -57,6 +59,13 @@ class FlowResponse(UncheckedBaseModel):
Description of the Flow.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the File.
+ """
+
readme: typing.Optional[str] = pydantic.Field(default=None)
"""
Long description of the file.
@@ -111,6 +120,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
new file mode 100644
index 00000000..ee45ffdf
--- /dev/null
+++ b/src/humanloop/types/linked_file_request.py
@@ -0,0 +1,21 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class LinkedFileRequest(UncheckedBaseModel):
+ file_id: str
+ environment_id: typing.Optional[str] = None
+ version_id: typing.Optional[str] = None
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
new file mode 100644
index 00000000..36481f41
--- /dev/null
+++ b/src/humanloop/types/list_agents.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ListAgents(UncheckedBaseModel):
+ records: typing.List[AgentResponse] = pydantic.Field()
+ """
+ The list of Agents.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 61edbec5..7b736e14 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 686dab26..41ec4008 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index 94cda05e..f773d3f9 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 4080a6a1..84ddc89c 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index 0ba81dd3..cd7a0a26 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -9,4 +9,7 @@
from .tool_log_response import ToolLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
-LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"]
+ from .agent_log_response import AgentLogResponse
+LogResponse = typing.Union[
+ "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
+]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
new file mode 100644
index 00000000..69ffacf4
--- /dev/null
+++ b/src/humanloop/types/log_stream_response.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .prompt_call_stream_response import PromptCallStreamResponse
+from .agent_log_stream_response import AgentLogStreamResponse
+
+LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py
index 8473d2ae..3f2c99fb 100644
--- a/src/humanloop/types/model_providers.py
+++ b/src/humanloop/types/model_providers.py
@@ -4,7 +4,7 @@
ModelProviders = typing.Union[
typing.Literal[
- "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek"
+ "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate"
],
typing.Any,
]
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index e70dc4fb..1809af57 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -39,6 +39,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .prompt_response import PromptResponse # noqa: E402
diff --git a/src/humanloop/types/on_agent_call_enum.py b/src/humanloop/types/on_agent_call_enum.py
new file mode 100644
index 00000000..3730256e
--- /dev/null
+++ b/src/humanloop/types/on_agent_call_enum.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OnAgentCallEnum = typing.Union[typing.Literal["stop", "continue"], typing.Any]
diff --git a/src/humanloop/types/open_ai_reasoning_effort.py b/src/humanloop/types/open_ai_reasoning_effort.py
new file mode 100644
index 00000000..d8c48547
--- /dev/null
+++ b/src/humanloop/types/open_ai_reasoning_effort.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+OpenAiReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
new file mode 100644
index 00000000..0febbadd
--- /dev/null
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -0,0 +1,31 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+import typing
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+import pydantic
+
+
+class PaginatedDataAgentResponse(UncheckedBaseModel):
+ records: typing.List[AgentResponse]
+ page: int
+ size: int
+ total: int
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index 9e3c568e..c508f8a6 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 275f0528..2e82c736 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 990d58be..6cfcf9ae 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index 57bae587..f41ca9ba 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,6 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_log_response import AgentLogResponse
+from .agent_response import AgentResponse
from .evaluator_log_response import EvaluatorLogResponse
from .evaluator_response import EvaluatorResponse
from .flow_log_response import FlowLogResponse
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index ff71e584..d9e1d914 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index 0e52b361..e2962e87 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
similarity index 76%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index bd7082b3..87d5b603 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -9,16 +11,18 @@
from .version_deployment_response import VersionDeploymentResponse
from .version_id_response import VersionIdResponse
import typing
-from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem,
+from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import pydantic
-class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel):
+class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
+ UncheckedBaseModel
+):
records: typing.List[
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem
]
page: int
size: int
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
similarity index 63%
rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index 65c4f324..a1b4f056 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -6,7 +6,8 @@
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[
- PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse
-]
+PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
+ typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
+)
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 78e177e8..16232e0b 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index d587d175..d2d36f78 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -16,9 +18,11 @@
from .model_providers import ModelProviders
from .populate_template_response_stop import PopulateTemplateResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -126,9 +130,9 @@ class PopulateTemplateResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PopulateTemplateResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -176,6 +180,13 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -220,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
new file mode 100644
index 00000000..8dd9f7f6
--- /dev/null
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index 4e1ae69c..ec74437f 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,6 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 6461bb19..80ba5ed5 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -9,12 +9,18 @@
from .model_providers import ModelProviders
from .prompt_kernel_request_stop import PromptKernelRequestStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .tool_function import ToolFunction
from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
+ """
+ Base class used by both PromptKernelRequest and AgentKernelRequest.
+
+ Contains the consistent Prompt-related fields.
+ """
+
model: str = pydantic.Field()
"""
The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models)
@@ -90,9 +96,9 @@ class PromptKernelRequest(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptKernelRequestReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
new file mode 100644
index 00000000..dda61bb4
--- /dev/null
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 2a1bad11..a9e26318 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -213,6 +215,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .tool_log_response import ToolLogResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 07f4755d..786617f3 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -10,9 +10,11 @@
from .model_providers import ModelProviders
from .prompt_response_stop import PromptResponseStop
from .response_format import ResponseFormat
-from .reasoning_effort import ReasoningEffort
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .tool_function import ToolFunction
from .linked_tool_response import LinkedToolResponse
+import typing_extensions
+from ..core.serialization import FieldMetadata
from .environment_response import EnvironmentResponse
import datetime as dt
from .user_response import UserResponse
@@ -120,9 +122,9 @@ class PromptResponse(UncheckedBaseModel):
The format of the response. Only `{"type": "json_object"}` is currently supported for chat.
"""
- reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None)
+ reasoning_effort: typing.Optional[PromptResponseReasoningEffort] = pydantic.Field(default=None)
"""
- Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models.
+ Guidance on how many reasoning tokens it should generate before creating a response to the prompt. OpenAI reasoning models (o1, o3-mini) expect a OpenAIReasoningEffort enum. Anthropic reasoning models expect an integer, which signifies the maximum token budget.
"""
tools: typing.Optional[typing.List[ToolFunction]] = pydantic.Field(default=None)
@@ -170,6 +172,13 @@ class PromptResponse(UncheckedBaseModel):
Name of the Prompt.
"""
+ schema_: typing_extensions.Annotated[
+ typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema")
+ ] = pydantic.Field(default=None)
+ """
+ The JSON schema for the Prompt.
+ """
+
version_id: str = pydantic.Field()
"""
Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned.
@@ -214,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The serialized kernel for the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -224,6 +238,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
new file mode 100644
index 00000000..e136637f
--- /dev/null
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from .open_ai_reasoning_effort import OpenAiReasoningEffort
+
+PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/reasoning_effort.py b/src/humanloop/types/reasoning_effort.py
deleted file mode 100644
index da0a0354..00000000
--- a/src/humanloop/types/reasoning_effort.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ReasoningEffort = typing.Union[typing.Literal["high", "medium", "low"], typing.Any]
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index d94b1178..770dc487 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -5,5 +5,6 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+from .agent_response import AgentResponse
-RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse]
+RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
new file mode 100644
index 00000000..55bf2712
--- /dev/null
+++ b/src/humanloop/types/tool_call_response.py
@@ -0,0 +1,168 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
+from .evaluator_response import EvaluatorResponse
+from .flow_response import FlowResponse
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
+from .version_deployment_response import VersionDeploymentResponse
+from .version_id_response import VersionIdResponse
+from .agent_log_response import AgentLogResponse
+from .evaluator_log_response import EvaluatorLogResponse
+from .flow_log_response import FlowLogResponse
+from .prompt_log_response import PromptLogResponse
+from .tool_log_response import ToolLogResponse
+import typing
+import datetime as dt
+import pydantic
+from .log_status import LogStatus
+from .log_response import LogResponse
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
+
+class ToolCallResponse(UncheckedBaseModel):
+ """
+ Response model for a Tool call.
+ """
+
+ start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event started.
+ """
+
+ end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ When the logged event ended.
+ """
+
+ tool: ToolResponse = pydantic.Field()
+ """
+ Tool used to generate the Log.
+ """
+
+ output: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
+ """
+ User defined timestamp for when the log was created.
+ """
+
+ error: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Error message if the log is an error.
+ """
+
+ provider_latency: typing.Optional[float] = pydantic.Field(default=None)
+ """
+ Duration of the logged event in seconds.
+ """
+
+ stdout: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Captured log and debug statements.
+ """
+
+ provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw request sent to provider.
+ """
+
+ provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Raw response received the provider.
+ """
+
+ inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ The inputs passed to the prompt template.
+ """
+
+ source: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifies where the model was called from.
+ """
+
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
+ """
+ Any additional metadata to record.
+ """
+
+ log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
+ """
+ Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ """
+
+ source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ """
+
+ trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The ID of the parent Log to nest this Log under in a Trace.
+ """
+
+ batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
+ """
+ Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
+ """
+
+ user: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ End-user ID related to the Log.
+ """
+
+ environment: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The name of the Environment the Log is associated to.
+ """
+
+ save: typing.Optional[bool] = pydantic.Field(default=None)
+ """
+ Whether the request/response payloads will be stored on Humanloop.
+ """
+
+ log_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ """
+
+ id: str = pydantic.Field()
+ """
+ ID of the log.
+ """
+
+ evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ """
+ List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
+ """
+
+ trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Identifier for the Flow that the Trace belongs to.
+ """
+
+ trace_id: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ ID of the Trace containing the Tool Call Log.
+ """
+
+ trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ """
+ Logs nested under this Log in the Trace.
+ """
+
+ if IS_PYDANTIC_V2:
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
+ else:
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 1b6081c3..251223af 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -2,6 +2,8 @@
from __future__ import annotations
from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_linked_file_response import AgentLinkedFileResponse
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
from .monitoring_evaluator_response import MonitoringEvaluatorResponse
@@ -13,6 +15,7 @@
import datetime as dt
import pydantic
from .log_status import LogStatus
+from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.pydantic_utilities import update_forward_refs
@@ -152,6 +155,11 @@ class ToolLogResponse(UncheckedBaseModel):
Tool used to generate the Log.
"""
+ output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
+ """
+ The message returned by the Tool.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -162,6 +170,7 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_log_response import AgentLogResponse # noqa: E402
from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
from .flow_log_response import FlowLogResponse # noqa: E402
from .prompt_log_response import PromptLogResponse # noqa: E402
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 0b835918..70537215 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -152,6 +152,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index e2e82d9f..0db57d69 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -36,6 +36,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index e0f73573..4fadcff0 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionDeploymentResponseFile = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index 877851a9..e3f5dc27 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -30,6 +30,8 @@ class Config:
extra = pydantic.Extra.allow
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
+from .agent_response import AgentResponse # noqa: E402
from .evaluator_response import EvaluatorResponse # noqa: E402
from .flow_response import FlowResponse # noqa: E402
from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 2f56346c..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -10,6 +10,7 @@
from .tool_response import ToolResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
+ from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse"
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
From 941a52e552c98d0e32dcbbf5e286f19f910fadc4 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 30 Apr 2025 17:47:04 +0100
Subject: [PATCH 22/39] feat(sync): Add metadata logging for sync operations
---
src/humanloop/sync/metadata_handler.py | 110 +++++++++++++++++++++++++
src/humanloop/sync/sync_client.py | 53 +++++++++---
2 files changed, 152 insertions(+), 11 deletions(-)
create mode 100644 src/humanloop/sync/metadata_handler.py
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
new file mode 100644
index 00000000..9155df45
--- /dev/null
+++ b/src/humanloop/sync/metadata_handler.py
@@ -0,0 +1,110 @@
+import json
+import time
+from datetime import datetime
+from pathlib import Path
+from typing import Dict, List, Optional
+import logging
+
+logger = logging.getLogger(__name__)
+
+class MetadataHandler:
+ """Handles metadata storage and retrieval for sync operations.
+
+ This class manages a JSON file that stores the last 5 sync operations
+ and maintains a record of the most recent operation with detailed information.
+ """
+
+ def __init__(self, base_dir: Path, max_history: int = 5):
+ """Initialize the metadata handler.
+
+ Args:
+ base_dir: Base directory where metadata will be stored
+ max_history: Maximum number of operations to keep in history
+ """
+ self.base_dir = base_dir
+ self.metadata_file = base_dir / ".sync_metadata.json"
+ self.max_history = max_history
+ self._ensure_metadata_file()
+
+ def _ensure_metadata_file(self) -> None:
+ """Ensure the metadata file exists with proper structure."""
+ if not self.metadata_file.exists():
+ initial_data = {
+ "last_operation": None,
+ "history": []
+ }
+ self._write_metadata(initial_data)
+
+ def _read_metadata(self) -> Dict:
+ """Read the current metadata from file."""
+ try:
+ with open(self.metadata_file, 'r') as f:
+ return json.load(f)
+ except Exception as e:
+ logger.error(f"Error reading metadata file: {e}")
+ return {"last_operation": None, "history": []}
+
+ def _write_metadata(self, data: Dict) -> None:
+ """Write metadata to file."""
+ try:
+ self.metadata_file.parent.mkdir(parents=True, exist_ok=True)
+ with open(self.metadata_file, 'w') as f:
+ json.dump(data, f, indent=2)
+ except Exception as e:
+ logger.error(f"Error writing metadata file: {e}")
+
+ def log_operation(
+ self,
+ operation_type: str,
+ path: str,
+ environment: Optional[str] = None,
+ successful_files: Optional[List[str]] = None,
+ failed_files: Optional[List[str]] = None,
+ error: Optional[str] = None,
+ start_time: Optional[float] = None
+ ) -> None:
+ """Log a sync operation.
+
+ Args:
+ operation_type: Type of operation (e.g., "pull", "push")
+ path: The path that was synced
+ environment: Optional environment name
+ successful_files: List of successfully processed files
+ failed_files: List of files that failed to process
+ error: Any error message if the operation failed
+ start_time: Optional timestamp when the operation started (from time.time())
+ """
+ current_time = datetime.now().isoformat()
+ duration_ms = int((time.time() - (start_time or time.time())) * 1000) if start_time else 0
+
+ operation_data = {
+ "timestamp": current_time,
+ "operation_type": operation_type,
+ "path": path,
+ "environment": environment,
+ "successful_files": successful_files or [],
+ "failed_files": failed_files or [],
+ "error": error,
+ "duration_ms": duration_ms
+ }
+
+ metadata = self._read_metadata()
+
+ # Update last operation
+ metadata["last_operation"] = operation_data
+
+ # Update history
+ metadata["history"].insert(0, operation_data)
+ metadata["history"] = metadata["history"][:self.max_history]
+
+ self._write_metadata(metadata)
+
+ def get_last_operation(self) -> Optional[Dict]:
+ """Get the most recent operation details."""
+ metadata = self._read_metadata()
+ return metadata.get("last_operation")
+
+ def get_history(self) -> List[Dict]:
+ """Get the operation history."""
+ metadata = self._read_metadata()
+ return metadata.get("history", [])
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index c8d2d3c4..351ea42f 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -3,6 +3,8 @@
from typing import List, TYPE_CHECKING, Optional
from functools import lru_cache
from humanloop.types import FileType
+from .metadata_handler import MetadataHandler
+import time
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -49,6 +51,8 @@ def __init__(
self._cache_size = cache_size
# Create a new cached version of get_file_content with the specified cache size
self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
+ # Initialize metadata handler
+ self.metadata = MetadataHandler(self.base_dir)
def _get_file_content_impl(self, path: str, file_type: FileType) -> Optional[str]:
"""Implementation of get_file_content without the cache decorator.
@@ -227,10 +231,6 @@ def _pull_directory(self,
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
- if not file.path.startswith(path):
- # Filter by path
- continue
-
# Skip if no content
if not getattr(file, "content", None):
logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
@@ -256,11 +256,12 @@ def _pull_directory(self,
return successful_files
- def pull(self, path: str, environment: str | None = None) -> List[str]:
+ def pull(self, path: str | None = None, environment: str | None = None) -> List[str]:
"""Pull files from Humanloop to local filesystem.
If the path ends with .prompt or .agent, pulls that specific file.
Otherwise, pulls all files under the specified directory path.
+ If no path is provided, pulls all files from the root.
Args:
path: The path to pull from (either a specific file or directory)
@@ -269,9 +270,39 @@ def pull(self, path: str, environment: str | None = None) -> List[str]:
Returns:
List of successfully processed file paths
"""
- normalized_path = self._normalize_path(path)
- if self.is_file(path):
- self._pull_file(normalized_path, environment)
- return [path]
- else:
- return self._pull_directory(normalized_path, environment)
+ start_time = time.time()
+ try:
+ if path is None:
+ successful_files = self._pull_directory(None, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+ else:
+ normalized_path = self._normalize_path(path)
+ if self.is_file(path):
+ self._pull_file(normalized_path, environment)
+ successful_files = [path]
+ failed_files = []
+ else:
+ successful_files = self._pull_directory(normalized_path, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+
+ # Log the successful operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=path or "", # Use empty string if path is None
+ environment=environment,
+ successful_files=successful_files,
+ failed_files=failed_files,
+ start_time=start_time
+ )
+
+ return successful_files
+ except Exception as e:
+ # Log the failed operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=path or "", # Use empty string if path is None
+ environment=environment,
+ error=str(e),
+ start_time=start_time
+ )
+ raise
From 0063a4501828f2c0566461c42bbabb20584541e6 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 12:44:09 +0100
Subject: [PATCH 23/39] create humanloop cli and support pull operation
---
.fernignore | 1 +
poetry.lock | 45 +++++++-
pyproject.toml | 22 ++--
src/humanloop/cli/__init__.py | 0
src/humanloop/cli/__main__.py | 167 ++++++++++++++++++++++++++++++
src/humanloop/sync/sync_client.py | 2 +-
6 files changed, 223 insertions(+), 14 deletions(-)
create mode 100644 src/humanloop/cli/__init__.py
create mode 100644 src/humanloop/cli/__main__.py
diff --git a/.fernignore b/.fernignore
index e7ec8aee..d52ed17e 100644
--- a/.fernignore
+++ b/.fernignore
@@ -14,6 +14,7 @@ README.md
src/humanloop/decorators
src/humanloop/otel
src/humanloop/sync
+src/humanloop/cli/
## Tests
diff --git a/poetry.lock b/poetry.lock
index c368ca2b..5b31062a 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -194,6 +194,21 @@ files = [
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
]
+[[package]]
+name = "click"
+version = "8.1.8"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+groups = ["main"]
+files = [
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
[[package]]
name = "cohere"
version = "5.15.0"
@@ -425,6 +440,7 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -1549,14 +1565,14 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "pytest-retry"
-version = "1.7.0"
+version = "1.6.3"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
groups = ["dev"]
files = [
- {file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
- {file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
+ {file = "pytest_retry-1.6.3-py3-none-any.whl", hash = "sha256:e96f7df77ee70b0838d1085f9c3b8b5b7d74bf8947a0baf32e2b8c71b27683c8"},
+ {file = "pytest_retry-1.6.3.tar.gz", hash = "sha256:36ccfa11c8c8f9ddad5e20375182146d040c20c4a791745139c5a99ddf1b557d"},
]
[package.dependencies]
@@ -1983,6 +1999,27 @@ files = [
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
+[[package]]
+name = "setuptools"
+version = "80.1.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+ {file = "setuptools-80.1.0-py3-none-any.whl", hash = "sha256:ea0e7655c05b74819f82e76e11a85b31779fee7c4969e82f72bab0664e8317e4"},
+ {file = "setuptools-80.1.0.tar.gz", hash = "sha256:2e308396e1d83de287ada2c2fd6e64286008fe6aca5008e0b6a8cb0e2c86eedd"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
+core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
+
[[package]]
name = "six"
version = "1.17.0"
@@ -2376,4 +2413,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "a504b0d639ca08283dd45b6af246f7e5f2a6ed5b26fb58e90af77d320ef2045a"
+content-hash = "0890733460cf6e0f6df37795594159de92028ba3fb5811d38cd7a7e5f9ec4282"
diff --git a/pyproject.toml b/pyproject.toml
index 9a81cf79..903a364b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,11 +6,13 @@ keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"]
[tool.poetry]
name = "humanloop"
-version = "0.8.36"
-description = ""
-readme = "README.md"
-authors = []
+version = "0.1.0"
+description = "Humanloop Python SDK"
+authors = ["Your Name "]
keywords = []
+packages = [
+ { include = "humanloop", from = "src" },
+]
classifiers = [
"Intended Audience :: Developers",
@@ -29,9 +31,6 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed"
]
-packages = [
- { include = "humanloop", from = "src"}
-]
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -56,6 +55,8 @@ protobuf = ">=5.29.3"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
+click = "^8.0.0"
+setuptools = "^80.1.0"
[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
@@ -72,7 +73,7 @@ openai = "^1.52.2"
pandas = "^2.2.0"
parse-type = ">=0.6.4"
pyarrow = "^19.0.0"
-pytest-retry = "^1.6.3"
+pytest-retry = "1.6.3"
python-dotenv = "^1.0.1"
replicate = "^1.0.3"
ruff = "^0.5.6"
@@ -89,7 +90,10 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.poetry.scripts]
+humanloop = "humanloop.cli.__main__:cli"
[build-system]
-requires = ["poetry-core"]
+requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
+
diff --git a/src/humanloop/cli/__init__.py b/src/humanloop/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
new file mode 100644
index 00000000..d262d7c0
--- /dev/null
+++ b/src/humanloop/cli/__main__.py
@@ -0,0 +1,167 @@
+import click
+import logging
+from pathlib import Path
+from typing import Optional, Callable
+from functools import wraps
+from dotenv import load_dotenv, find_dotenv
+import os
+from humanloop import Humanloop
+from humanloop.sync.sync_client import SyncClient
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO) # Set back to INFO level
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s") # Simplified formatter
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
+ """Get a Humanloop client instance."""
+ if not api_key:
+ if env_file:
+ load_dotenv(env_file)
+ else:
+ env_path = find_dotenv()
+ if env_path:
+ load_dotenv(env_path)
+ else:
+ if os.path.exists(".env"):
+ load_dotenv(".env")
+ else:
+ load_dotenv()
+
+ api_key = os.getenv("HUMANLOOP_API_KEY")
+ if not api_key:
+ raise click.ClickException(
+ "No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key"
+ )
+
+ return Humanloop(api_key=api_key, base_url=base_url)
+
+def common_options(f: Callable) -> Callable:
+ """Decorator for common CLI options."""
+ @click.option(
+ "--api-key",
+ help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
+ default=None,
+ )
+ @click.option(
+ "--env-file",
+ help="Path to .env file. If not provided, looks for .env in current directory.",
+ default=None,
+ type=click.Path(exists=True),
+ )
+ @click.option(
+ "--base-dir",
+ help="Base directory for synced files",
+ default="humanloop",
+ type=click.Path(),
+ )
+ # Hidden option for internal use - allows overriding the Humanloop API base URL
+ # Can be set via --base-url or HUMANLOOP_BASE_URL environment variable
+ @click.option(
+ "--base-url",
+ default=None,
+ hidden=True,
+ )
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs)
+ return wrapper
+
+def handle_sync_errors(f: Callable) -> Callable:
+ """Decorator for handling sync operation errors."""
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ logger.error(f"Error during sync operation: {str(e)}")
+ raise click.ClickException(str(e))
+ return wrapper
+
+@click.group()
+def cli():
+ """Humanloop CLI for managing sync operations."""
+ pass
+
+@cli.command()
+@click.option(
+ "--path",
+ "-p",
+ help="Path to pull (file or directory). If not provided, pulls everything.",
+ default=None,
+)
+@click.option(
+ "--environment",
+ "-e",
+ help="Environment to pull from (e.g. 'production', 'staging')",
+ default=None,
+)
+@common_options
+@handle_sync_errors
+def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
+ """Pull files from Humanloop to local filesystem.
+
+ If PATH is provided and ends with .prompt or .agent, pulls that specific file.
+ Otherwise, pulls all files under the specified directory path.
+ If no PATH is provided, pulls all files from the root.
+ """
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir)
+
+ click.echo("Pulling files from Humanloop...")
+
+ click.echo(f"Path: {path or '(root)'}")
+ click.echo(f"Environment: {environment or '(default)'}")
+
+ successful_files = sync_client.pull(path, environment)
+
+ # Get metadata about the operation
+ metadata = sync_client.metadata.get_last_operation()
+ if metadata:
+ click.echo(f"\nSync completed in {metadata['duration_ms']}ms")
+ if metadata['successful_files']:
+ click.echo(f"\nSuccessfully synced {len(metadata['successful_files'])} files:")
+ for file in metadata['successful_files']:
+ click.echo(f" ✓ {file}")
+ if metadata['failed_files']:
+ click.echo(f"\nFailed to sync {len(metadata['failed_files'])} files:")
+ for file in metadata['failed_files']:
+ click.echo(f" ✗ {file}")
+
+@cli.command()
+@common_options
+@handle_sync_errors
+def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
+ """Show sync operation history."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir)
+
+ history = sync_client.metadata.get_history()
+ if not history:
+ click.echo("No sync operations found in history.")
+ return
+
+ click.echo("Sync Operation History:")
+ click.echo("======================")
+
+ for op in history:
+ click.echo(f"\nOperation: {op['operation_type']}")
+ click.echo(f"Timestamp: {op['timestamp']}")
+ click.echo(f"Path: {op['path'] or '(root)'}")
+ if op['environment']:
+ click.echo(f"Environment: {op['environment']}")
+ click.echo(f"Duration: {op['duration_ms']}ms")
+ if op['successful_files']:
+ click.echo(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}")
+ if op['failed_files']:
+ click.echo(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}")
+ if op['error']:
+ click.echo(f"Error: {op['error']}")
+ click.echo("----------------------")
+
+if __name__ == "__main__":
+ cli()
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 351ea42f..a324c096 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -277,7 +277,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
failed_files = [] # Failed files are already logged in _pull_directory
else:
normalized_path = self._normalize_path(path)
- if self.is_file(path):
+ if self.is_file(path.strip()):
self._pull_file(normalized_path, environment)
successful_files = [path]
failed_files = []
From de5c767a6d9a0afcb5a66bc791dead05dd2e0844 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 14:14:12 +0100
Subject: [PATCH 24/39] add oneline option for displaying sync history
---
src/humanloop/cli/__main__.py | 52 ++++++++++++++++++++++++-----------
1 file changed, 36 insertions(+), 16 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index d262d7c0..d9027ef8 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -7,6 +7,7 @@
import os
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
+from datetime import datetime
# Set up logging
logger = logging.getLogger(__name__)
@@ -132,10 +133,23 @@ def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str]
for file in metadata['failed_files']:
click.echo(f" ✗ {file}")
+def format_timestamp(timestamp: str) -> str:
+ """Format timestamp to a more readable format."""
+ try:
+ dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
+ return dt.strftime('%Y-%m-%d %H:%M:%S')
+ except (ValueError, AttributeError):
+ return timestamp
+
@cli.command()
+@click.option(
+ "--oneline",
+ is_flag=True,
+ help="Display history in a single line per operation",
+)
@common_options
@handle_sync_errors
-def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
+def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
"""Show sync operation history."""
client = get_client(api_key, env_file, base_url)
sync_client = SyncClient(client, base_dir=base_dir)
@@ -145,23 +159,29 @@ def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base
click.echo("No sync operations found in history.")
return
- click.echo("Sync Operation History:")
- click.echo("======================")
+ if not oneline:
+ click.echo("Sync Operation History:")
+ click.echo("======================")
for op in history:
- click.echo(f"\nOperation: {op['operation_type']}")
- click.echo(f"Timestamp: {op['timestamp']}")
- click.echo(f"Path: {op['path'] or '(root)'}")
- if op['environment']:
- click.echo(f"Environment: {op['environment']}")
- click.echo(f"Duration: {op['duration_ms']}ms")
- if op['successful_files']:
- click.echo(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}")
- if op['failed_files']:
- click.echo(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}")
- if op['error']:
- click.echo(f"Error: {op['error']}")
- click.echo("----------------------")
+ if oneline:
+ # Format: timestamp | operation_type | path | environment | duration_ms | status
+ status = "✓" if not op['failed_files'] else "✗"
+ click.echo(f"{format_timestamp(op['timestamp'])} | {op['operation_type']} | {op['path'] or '(root)'} | {op['environment'] or '-'} | {op['duration_ms']}ms | {status}")
+ else:
+ click.echo(f"\nOperation: {op['operation_type']}")
+ click.echo(f"Timestamp: {format_timestamp(op['timestamp'])}")
+ click.echo(f"Path: {op['path'] or '(root)'}")
+ if op['environment']:
+ click.echo(f"Environment: {op['environment']}")
+ click.echo(f"Duration: {op['duration_ms']}ms")
+ if op['successful_files']:
+ click.echo(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}")
+ if op['failed_files']:
+ click.echo(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}")
+ if op['error']:
+ click.echo(f"Error: {op['error']}")
+ click.echo("----------------------")
if __name__ == "__main__":
cli()
\ No newline at end of file
From d5e2de1e528cd52e064fd4249cf7c948c315c685 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 14:22:30 +0100
Subject: [PATCH 25/39] feat: improve local file handling in overload
- Make local file access more explicit by throwing errors instead of silent fallbacks
- Add proper error handling for file not found and IO errors
- Improve version/environment handling with clear warnings
- Update docstrings to better document behavior
This change makes the behavior more predictable when use_local_files=True:
- Throws FileNotFoundError/IOError if local file can't be accessed
- Warns and uses remote when version_id/environment is specified
- No more silent fallbacks to remote API
---
src/humanloop/overload.py | 31 ++++++++++++++++---
src/humanloop/sync/sync_client.py | 51 +++++++++++++++++++------------
2 files changed, 59 insertions(+), 23 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 5d874d3d..bcb0f605 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -143,9 +143,19 @@ def overload_with_local_files(
) -> Union[PromptsClient, AgentsClient]:
"""Overload call and log methods to handle local files when use_local_files is True.
+ When use_local_files is True:
+ - If only path is specified (no version_id or environment), attempts to use local file
+ - If local file is not found or cannot be read, raises an error
+ - If version_id or environment is specified, uses remote version with a warning
+
Args:
client: The client to overload (PromptsClient or AgentsClient)
+ sync_client: The sync client for handling local files
use_local_files: Whether to use local files
+
+ Raises:
+ FileNotFoundError: If use_local_files is True and local file is not found
+ IOError: If use_local_files is True and local file cannot be read
"""
original_call = client._call if hasattr(client, '_call') else client.call
original_log = client._log if hasattr(client, '_log') else client.log
@@ -154,11 +164,24 @@ def overload_with_local_files(
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
# Handle local files if enabled
if use_local_files and "path" in kwargs:
- # Normalize the path and get file content
- normalized_path = sync_client._normalize_path(kwargs["path"])
- file_content = sync_client.get_file_content(normalized_path, file_type)
- if file_content is not None:
+ # Check if version_id or environment is specified
+ has_version_info = "version_id" in kwargs or "environment" in kwargs
+
+ if has_version_info:
+ logger.warning(
+ "Ignoring local file for %s as version_id or environment was specified. "
+ "Using remote version instead.",
+ kwargs["path"]
+ )
+ else:
+ # Only use local file if no version info is specified
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+ try:
+ file_content = sync_client.get_file_content(normalized_path, file_type)
kwargs[file_type] = file_content
+ except (FileNotFoundError, IOError) as e:
+ # Re-raise with more context
+ raise type(e)(f"Failed to use local file for {kwargs['path']}: {str(e)}")
try:
if function_name == "call":
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index a324c096..704211eb 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -54,31 +54,40 @@ def __init__(
# Initialize metadata handler
self.metadata = MetadataHandler(self.base_dir)
- def _get_file_content_impl(self, path: str, file_type: FileType) -> Optional[str]:
+ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
"""Implementation of get_file_content without the cache decorator.
This is the actual implementation that gets wrapped by lru_cache.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (prompt or agent)
+
+ Returns:
+ The file content
+
+ Raises:
+ FileNotFoundError: If the file doesn't exist
+ IOError: If there's an error reading the file
"""
- try:
- # Construct path to local file
- local_path = self.base_dir / path
- # Add appropriate extension
- local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+ # Construct path to local file
+ local_path = self.base_dir / path
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if not local_path.exists():
+ raise FileNotFoundError(f"Local file not found: {local_path}")
- if local_path.exists():
- # Read the file content
- with open(local_path) as f:
- file_content = f.read()
- logger.debug(f"Using local file content from {local_path}")
- return file_content
- else:
- logger.warning(f"Local file not found: {local_path}, falling back to API")
- return None
+ try:
+ # Read the file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
except Exception as e:
- logger.error(f"Error reading local file: {e}, falling back to API")
- return None
+ raise IOError(f"Error reading local file {local_path}: {str(e)}")
- def get_file_content(self, path: str, file_type: FileType) -> Optional[str]:
+ def get_file_content(self, path: str, file_type: FileType) -> str:
"""Get the content of a file from cache or filesystem.
This method uses an LRU cache to store file contents. When the cache is full,
@@ -89,7 +98,11 @@ def get_file_content(self, path: str, file_type: FileType) -> Optional[str]:
file_type: The type of file (prompt or agent)
Returns:
- The file content if found, None otherwise
+ The file content
+
+ Raises:
+ FileNotFoundError: If the file doesn't exist
+ IOError: If there's an error reading the file
"""
return self._get_file_content_impl(path, file_type)
From 7f89e45c5fbd31caf3b49155c09b7204d04b6b07 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 17:03:59 +0100
Subject: [PATCH 26/39] improve error handling and cli message formatting
---
src/humanloop/cli/__main__.py | 81 +++++++++++++++++-------------
src/humanloop/overload.py | 18 ++++---
src/humanloop/sync/sync_client.py | 33 +++++++------
tests/conftest.py | 2 +-
tests/sync/test_sync.py | 82 ++++++++++++++++++++++++++++++-
5 files changed, 157 insertions(+), 59 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index d9027ef8..ee7c730b 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -5,6 +5,7 @@
from functools import wraps
from dotenv import load_dotenv, find_dotenv
import os
+import sys
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
from datetime import datetime
@@ -18,6 +19,12 @@
if not logger.hasHandlers():
logger.addHandler(console_handler)
+# Color constants
+SUCCESS_COLOR = "green"
+ERROR_COLOR = "red"
+INFO_COLOR = "blue"
+WARNING_COLOR = "yellow"
+
def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
"""Get a Humanloop client instance."""
if not api_key:
@@ -36,7 +43,7 @@ def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, ba
api_key = os.getenv("HUMANLOOP_API_KEY")
if not api_key:
raise click.ClickException(
- "No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key"
+ click.style("No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR)
)
return Humanloop(api_key=api_key, base_url=base_url)
@@ -47,12 +54,14 @@ def common_options(f: Callable) -> Callable:
"--api-key",
help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
default=None,
+ show_default=False,
)
@click.option(
"--env-file",
help="Path to .env file. If not provided, looks for .env in current directory.",
default=None,
type=click.Path(exists=True),
+ show_default=False,
)
@click.option(
"--base-dir",
@@ -60,8 +69,6 @@ def common_options(f: Callable) -> Callable:
default="humanloop",
type=click.Path(),
)
- # Hidden option for internal use - allows overriding the Humanloop API base URL
- # Can be set via --base-url or HUMANLOOP_BASE_URL environment variable
@click.option(
"--base-url",
default=None,
@@ -79,12 +86,19 @@ def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
- logger.error(f"Error during sync operation: {str(e)}")
- raise click.ClickException(str(e))
+ click.echo(click.style(str(f"Error: {e}"), fg=ERROR_COLOR))
+ sys.exit(1)
return wrapper
-@click.group()
-def cli():
+@click.group(
+ help="Humanloop CLI for managing sync operations.",
+ context_settings={
+ "help_option_names": ["-h", "--help"],
+ "max_content_width": 100,
+ }
+)
+@common_options
+def cli(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
"""Humanloop CLI for managing sync operations."""
pass
@@ -92,7 +106,9 @@ def cli():
@click.option(
"--path",
"-p",
- help="Path to pull (file or directory). If not provided, pulls everything.",
+ help="Path to pull (file or directory). If not provided, pulls everything. "+
+ "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "+
+ "To pull a directory, simply specify the path to the directory (e.g. abc/def to pull all files under abc/def and its subdirectories).",
default=None,
)
@click.option(
@@ -101,37 +117,37 @@ def cli():
help="Environment to pull from (e.g. 'production', 'staging')",
default=None,
)
-@common_options
@handle_sync_errors
def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
- """Pull files from Humanloop to local filesystem.
-
- If PATH is provided and ends with .prompt or .agent, pulls that specific file.
- Otherwise, pulls all files under the specified directory path.
- If no PATH is provided, pulls all files from the root.
- """
+ """Pull files from Humanloop to your local filesystem."""
client = get_client(api_key, env_file, base_url)
sync_client = SyncClient(client, base_dir=base_dir)
- click.echo("Pulling files from Humanloop...")
+ click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
- click.echo(f"Path: {path or '(root)'}")
- click.echo(f"Environment: {environment or '(default)'}")
+ click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
+ click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
successful_files = sync_client.pull(path, environment)
# Get metadata about the operation
metadata = sync_client.metadata.get_last_operation()
if metadata:
- click.echo(f"\nSync completed in {metadata['duration_ms']}ms")
+ # Determine if the operation was successful based on failed_files
+ is_successful = not metadata.get('failed_files') and not metadata.get('error')
+ duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
+ click.echo(click.style(f"\nSync completed in {metadata['duration_ms']}ms", fg=duration_color))
+
if metadata['successful_files']:
- click.echo(f"\nSuccessfully synced {len(metadata['successful_files'])} files:")
+ click.echo(click.style(f"\nSuccessfully synced {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
for file in metadata['successful_files']:
- click.echo(f" ✓ {file}")
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
if metadata['failed_files']:
- click.echo(f"\nFailed to sync {len(metadata['failed_files'])} files:")
+ click.echo(click.style(f"\nFailed to sync {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
for file in metadata['failed_files']:
- click.echo(f" ✗ {file}")
+ click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
+ if metadata.get('error'):
+ click.echo(click.style(f"\nError: {metadata['error']}", fg=ERROR_COLOR))
def format_timestamp(timestamp: str) -> str:
"""Format timestamp to a more readable format."""
@@ -147,7 +163,6 @@ def format_timestamp(timestamp: str) -> str:
is_flag=True,
help="Display history in a single line per operation",
)
-@common_options
@handle_sync_errors
def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
"""Show sync operation history."""
@@ -156,32 +171,32 @@ def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base
history = sync_client.metadata.get_history()
if not history:
- click.echo("No sync operations found in history.")
+ click.echo(click.style("No sync operations found in history.", fg=WARNING_COLOR))
return
if not oneline:
- click.echo("Sync Operation History:")
- click.echo("======================")
+ click.echo(click.style("Sync Operation History:", fg=INFO_COLOR))
+ click.echo(click.style("======================", fg=INFO_COLOR))
for op in history:
if oneline:
# Format: timestamp | operation_type | path | environment | duration_ms | status
- status = "✓" if not op['failed_files'] else "✗"
+ status = click.style("✓", fg=SUCCESS_COLOR) if not op['failed_files'] else click.style("✗", fg=ERROR_COLOR)
click.echo(f"{format_timestamp(op['timestamp'])} | {op['operation_type']} | {op['path'] or '(root)'} | {op['environment'] or '-'} | {op['duration_ms']}ms | {status}")
else:
- click.echo(f"\nOperation: {op['operation_type']}")
+ click.echo(click.style(f"\nOperation: {op['operation_type']}", fg=INFO_COLOR))
click.echo(f"Timestamp: {format_timestamp(op['timestamp'])}")
click.echo(f"Path: {op['path'] or '(root)'}")
if op['environment']:
click.echo(f"Environment: {op['environment']}")
click.echo(f"Duration: {op['duration_ms']}ms")
if op['successful_files']:
- click.echo(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}")
+ click.echo(click.style(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
if op['failed_files']:
- click.echo(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}")
+ click.echo(click.style(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
if op['error']:
- click.echo(f"Error: {op['error']}")
- click.echo("----------------------")
+ click.echo(click.style(f"Error: {op['error']}", fg=ERROR_COLOR))
+ click.echo(click.style("----------------------", fg=INFO_COLOR))
if __name__ == "__main__":
cli()
\ No newline at end of file
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index bcb0f605..5e2f953c 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,6 +1,7 @@
import inspect
import logging
import types
+import warnings
from typing import TypeVar, Union, Literal, Optional
from pathlib import Path
from humanloop.context import (
@@ -8,7 +9,7 @@
get_evaluation_context,
get_trace_id,
)
-from humanloop.evals.run import HumanloopRuntimeError
+from humanloop.error import HumanloopRuntimeError
from humanloop.evaluators.client import EvaluatorsClient
from humanloop.flows.client import FlowsClient
@@ -154,24 +155,25 @@ def overload_with_local_files(
use_local_files: Whether to use local files
Raises:
- FileNotFoundError: If use_local_files is True and local file is not found
- IOError: If use_local_files is True and local file cannot be read
+ HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
"""
original_call = client._call if hasattr(client, '_call') else client.call
original_log = client._log if hasattr(client, '_log') else client.log
file_type = _get_file_type_from_client(client)
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
+ if "id" and "path" in kwargs:
+ raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
# Handle local files if enabled
if use_local_files and "path" in kwargs:
# Check if version_id or environment is specified
has_version_info = "version_id" in kwargs or "environment" in kwargs
if has_version_info:
- logger.warning(
- "Ignoring local file for %s as version_id or environment was specified. "
+ warnings.warn(
+ f"Ignoring local file for {kwargs['path']} as version_id or environment was specified. "
"Using remote version instead.",
- kwargs["path"]
+ UserWarning
)
else:
# Only use local file if no version info is specified
@@ -179,9 +181,9 @@ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
try:
file_content = sync_client.get_file_content(normalized_path, file_type)
kwargs[file_type] = file_content
- except (FileNotFoundError, IOError) as e:
+ except (HumanloopRuntimeError) as e:
# Re-raise with more context
- raise type(e)(f"Failed to use local file for {kwargs['path']}: {str(e)}")
+ raise HumanloopRuntimeError(f"Failed to use local file for {kwargs['path']}: {str(e)}")
try:
if function_name == "call":
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 704211eb..d890d1fd 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -5,6 +5,7 @@
from humanloop.types import FileType
from .metadata_handler import MetadataHandler
import time
+from humanloop.error import HumanloopRuntimeError
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -67,8 +68,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
The file content
Raises:
- FileNotFoundError: If the file doesn't exist
- IOError: If there's an error reading the file
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
"""
# Construct path to local file
local_path = self.base_dir / path
@@ -76,7 +76,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
local_path = local_path.parent / f"{local_path.stem}.{file_type}"
if not local_path.exists():
- raise FileNotFoundError(f"Local file not found: {local_path}")
+ raise HumanloopRuntimeError(f"Local file not found: {local_path}")
try:
# Read the file content
@@ -85,7 +85,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
logger.debug(f"Using local file content from {local_path}")
return file_content
except Exception as e:
- raise IOError(f"Error reading local file {local_path}: {str(e)}")
+ raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
def get_file_content(self, path: str, file_type: FileType) -> str:
"""Get the content of a file from cache or filesystem.
@@ -101,8 +101,7 @@ def get_file_content(self, path: str, file_type: FileType) -> str:
The file content
Raises:
- FileNotFoundError: If the file doesn't exist
- IOError: If there's an error reading the file
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
"""
return self._get_file_content_impl(path, file_type)
@@ -205,7 +204,7 @@ def _pull_file(self, path: str, environment: str | None = None) -> None:
self._save_serialized_file(file.content, file.path, file.type)
def _pull_directory(self,
- path: str | None = None,
+ directory: str | None = None,
environment: str | None = None,
) -> List[str]:
"""Sync prompt and agent files from Humanloop to local filesystem.
@@ -219,6 +218,9 @@ def _pull_directory(self,
Returns:
List of successfully processed file paths
+
+ Raises:
+ Exception: If there is an error fetching files from Humanloop
"""
successful_files = []
failed_files = []
@@ -231,7 +233,7 @@ def _pull_directory(self,
page=page,
include_content=True,
environment=environment,
- directory=path
+ directory=directory
)
if len(response.records) == 0:
@@ -258,14 +260,14 @@ def _pull_directory(self,
page += 1
except Exception as e:
- logger.error(f"Failed to fetch page {page}: {str(e)}")
- break
+ raise HumanloopRuntimeError(f"Failed to fetch page {page}: {str(e)}")
- # Log summary
- if successful_files:
- logger.info(f"\nSynced {len(successful_files)} files")
- if failed_files:
- logger.error(f"Failed to sync {len(failed_files)} files")
+ # Log summary only if we have results
+ if successful_files or failed_files:
+ if successful_files:
+ logger.info(f"\nSynced {len(successful_files)} files")
+ if failed_files:
+ logger.error(f"Failed to sync {len(failed_files)} files")
return successful_files
@@ -286,6 +288,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
start_time = time.time()
try:
if path is None:
+ # Pull all files from the root
successful_files = self._pull_directory(None, environment)
failed_files = [] # Failed files are already logged in _pull_directory
else:
diff --git a/tests/conftest.py b/tests/conftest.py
index 6203cfa6..a8c78ac5 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -196,7 +196,7 @@ def humanloop_client(request, api_keys: APIKeys) -> Humanloop:
use_local_files = getattr(request, "param", False)
return Humanloop(
api_key=api_keys.humanloop,
- base_url="http://localhost:80/v5/",
+ base_url="http://localhost:80/v5",
use_local_files=use_local_files
)
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
index 9c0121e5..a2441b4b 100644
--- a/tests/sync/test_sync.py
+++ b/tests/sync/test_sync.py
@@ -10,6 +10,7 @@ class SyncableFile(NamedTuple):
type: FileType
model: str
id: str = ""
+ version_id: str = ""
@pytest.fixture
@@ -58,7 +59,13 @@ def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[Sync
path=full_path,
model=file.model,
)
- created_files.append(SyncableFile(path=full_path, type=file.type, model=file.model, id=response.id))
+ created_files.append(SyncableFile(
+ path=full_path,
+ type=file.type,
+ model=file.model,
+ id=response.id,
+ version_id=response.version_id
+ ))
return created_files
@@ -186,4 +193,75 @@ def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_st
path="invalid/path",
messages=[{"role": "user", "content": "Testing"}],
output="Test response"
- )
\ No newline at end of file
+ )
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_version_environment_handling(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles version_id and environment parameters.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test that version_id/environment parameters cause remote usage with warning
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test with version_id - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with both version_id and environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
\ No newline at end of file
From ddeef1301b7c5bddf373de48f1c455a2ccf8b634 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 17:12:25 +0100
Subject: [PATCH 27/39] fix version in pyproject.toml
---
poetry.lock | 23 +----------------------
pyproject.toml | 4 +---
2 files changed, 2 insertions(+), 25 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 5b31062a..7fc9199c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1999,27 +1999,6 @@ files = [
{file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
]
-[[package]]
-name = "setuptools"
-version = "80.1.0"
-description = "Easily download, build, install, upgrade, and uninstall Python packages"
-optional = false
-python-versions = ">=3.9"
-groups = ["main"]
-files = [
- {file = "setuptools-80.1.0-py3-none-any.whl", hash = "sha256:ea0e7655c05b74819f82e76e11a85b31779fee7c4969e82f72bab0664e8317e4"},
- {file = "setuptools-80.1.0.tar.gz", hash = "sha256:2e308396e1d83de287ada2c2fd6e64286008fe6aca5008e0b6a8cb0e2c86eedd"},
-]
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\"", "ruff (>=0.8.0) ; sys_platform != \"cygwin\""]
-core = ["importlib_metadata (>=6) ; python_version < \"3.10\"", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1) ; python_version < \"3.11\"", "wheel (>=0.43.0)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
-type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"]
-
[[package]]
name = "six"
version = "1.17.0"
@@ -2413,4 +2392,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "0890733460cf6e0f6df37795594159de92028ba3fb5811d38cd7a7e5f9ec4282"
+content-hash = "2c46b60972d2abc4e2b2a6b03d82ab32d2af74e9c5932a84d0ea6758fac32f72"
diff --git a/pyproject.toml b/pyproject.toml
index 903a364b..6780c06c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,9 +6,8 @@ keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"]
[tool.poetry]
name = "humanloop"
-version = "0.1.0"
+version = "0.8.36"
description = "Humanloop Python SDK"
-authors = ["Your Name "]
keywords = []
packages = [
{ include = "humanloop", from = "src" },
@@ -56,7 +55,6 @@ pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
click = "^8.0.0"
-setuptools = "^80.1.0"
[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
From fe18a9cc4d8f21da8a63298116b73cb9e7ad0ae2 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 1 May 2025 17:13:11 +0100
Subject: [PATCH 28/39] add readme to pyproject.toml
---
pyproject.toml | 1 +
1 file changed, 1 insertion(+)
diff --git a/pyproject.toml b/pyproject.toml
index 6780c06c..16052ae5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,6 +8,7 @@ keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"]
name = "humanloop"
version = "0.8.36"
description = "Humanloop Python SDK"
+readme = "README.md"
keywords = []
packages = [
{ include = "humanloop", from = "src" },
From c6539edfa486b155fbf1d603f276ffd7ee59c5be Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 2 May 2025 11:20:26 +0100
Subject: [PATCH 29/39] improve docstrings and help for cli
---
src/humanloop/cli/__main__.py | 33 ++++++++++++++++++++++++++++-----
1 file changed, 28 insertions(+), 5 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index ee7c730b..e31793f1 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -97,8 +97,7 @@ def wrapper(*args, **kwargs):
"max_content_width": 100,
}
)
-@common_options
-def cli(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
+def cli():
"""Humanloop CLI for managing sync operations."""
pass
@@ -106,8 +105,8 @@ def cli(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url
@click.option(
"--path",
"-p",
- help="Path to pull (file or directory). If not provided, pulls everything. "+
- "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "+
+ help="Path to pull (file or directory). If not provided, pulls everything. "
+ "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "
"To pull a directory, simply specify the path to the directory (e.g. abc/def to pull all files under abc/def and its subdirectories).",
default=None,
)
@@ -118,8 +117,31 @@ def cli(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url
default=None,
)
@handle_sync_errors
+@common_options
def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
- """Pull files from Humanloop to your local filesystem."""
+ """Pull prompt and agent files from Humanloop to your local filesystem.
+
+ \b
+ This command will:
+ 1. Fetch prompt and agent files from your Humanloop workspace
+ 2. Save them to your local filesystem
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ \b
+ The files will be saved with the following structure:
+ {base_dir}/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped."""
client = get_client(api_key, env_file, base_url)
sync_client = SyncClient(client, base_dir=base_dir)
@@ -164,6 +186,7 @@ def format_timestamp(timestamp: str) -> str:
help="Display history in a single line per operation",
)
@handle_sync_errors
+@common_options
def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
"""Show sync operation history."""
client = get_client(api_key, env_file, base_url)
From 318aa6f56b3b2e6bdffa3efe74db7ce52a6a8797 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 2 May 2025 11:29:43 +0100
Subject: [PATCH 30/39] clean up pyproject.toml
---
pyproject.toml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 8a13a168..9d40db4a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,6 +9,7 @@ name = "humanloop"
version = "0.8.36b1"
description = "Humanloop Python SDK"
readme = "README.md"
+authors = []
keywords = []
packages = [
{ include = "humanloop", from = "src" },
@@ -72,7 +73,7 @@ openai = "^1.52.2"
pandas = "^2.2.0"
parse-type = ">=0.6.4"
pyarrow = "^19.0.0"
-pytest-retry = "1.6.3"
+pytest-retry = "^1.6.3"
python-dotenv = "^1.0.1"
replicate = "^1.0.3"
ruff = "^0.5.6"
@@ -93,6 +94,6 @@ line-length = 120
humanloop = "humanloop.cli.__main__:cli"
[build-system]
-requires = ["poetry-core>=1.0.0"]
+requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
From 63cc8af39f6d2591827502033c79c65faf78d572 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 2 May 2025 11:32:31 +0100
Subject: [PATCH 31/39] add comments for clarity and remove keywords from
pyproject.toml
---
pyproject.toml | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 9d40db4a..8dc69a73 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,16 +1,17 @@
+# This section is used by PyPI and follows PEP 621 for package metadata
[project]
name = "humanloop"
description = "The Humanloop Python Library"
authors = []
-keywords = ["ai", "machine-learning", "llm", "sdk", "humanloop"]
+# This section is used by Poetry for development and building
+# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
version = "0.8.36b1"
description = "Humanloop Python SDK"
readme = "README.md"
authors = []
-keywords = []
packages = [
{ include = "humanloop", from = "src" },
]
From 9f5a3f21419a616af6d123f0c2188f771a1f9eb3 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 2 May 2025 11:54:27 +0100
Subject: [PATCH 32/39] improve docstrings and comments in overload.py
---
src/humanloop/overload.py | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 5e2f953c..43403b8c 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -78,7 +78,7 @@ def _overload_log(
try:
response = self._log(**kwargs_eval)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
if eval_callback is not None:
eval_callback(response.id)
@@ -86,7 +86,7 @@ def _overload_log(
try:
response = self._log(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -116,7 +116,6 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
}
try:
- logger.info(f"Calling inner overload")
response = self._call(**kwargs)
except Exception as e:
# Re-raising as HumanloopRuntimeError so the decorators don't catch it
@@ -147,12 +146,7 @@ def overload_with_local_files(
When use_local_files is True:
- If only path is specified (no version_id or environment), attempts to use local file
- If local file is not found or cannot be read, raises an error
- - If version_id or environment is specified, uses remote version with a warning
-
- Args:
- client: The client to overload (PromptsClient or AgentsClient)
- sync_client: The sync client for handling local files
- use_local_files: Whether to use local files
+ - If version_id and/or environment is specified, uses remote version with a warning
Raises:
HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
From 9afd2fc716a2dca0ec75d54c09c5f61322521553 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 6 May 2025 12:05:14 +0000
Subject: [PATCH 33/39] Release 0.8.36
---
poetry.lock | 198 ++---
pyproject.toml | 2 +-
reference.md | 303 +++-----
src/humanloop/__init__.py | 60 +-
src/humanloop/agents/__init__.py | 12 +
src/humanloop/agents/client.py | 678 +++++-------------
src/humanloop/agents/raw_client.py | 405 ++++++++---
src/humanloop/agents/requests/__init__.py | 6 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_stream_request_agent.py | 6 +
src/humanloop/agents/types/__init__.py | 6 +
.../agents/types/agent_log_request_agent.py | 6 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_stream_request_agent.py | 6 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/datasets/client.py | 10 +-
src/humanloop/evaluators/client.py | 10 +-
src/humanloop/files/client.py | 48 +-
src/humanloop/files/raw_client.py | 40 +-
src/humanloop/flows/client.py | 10 +-
src/humanloop/logs/client.py | 12 +
src/humanloop/prompts/__init__.py | 12 +
src/humanloop/prompts/client.py | 78 +-
src/humanloop/prompts/raw_client.py | 84 ++-
src/humanloop/prompts/requests/__init__.py | 6 +
.../requests/prompt_log_request_prompt.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 6 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/requests/__init__.py | 16 +-
.../requests/agent_continue_call_response.py | 202 ------
...gent_continue_call_response_tool_choice.py | 8 -
.../agent_continue_call_stream_response.py | 19 -
...t_continue_call_stream_response_payload.py | 8 -
src/humanloop/tools/client.py | 10 +-
src/humanloop/types/__init__.py | 20 +-
.../types/agent_continue_call_response.py | 224 ------
...gent_continue_call_response_tool_choice.py | 8 -
.../agent_continue_call_stream_response.py | 44 --
...t_continue_call_stream_response_payload.py | 8 -
src/humanloop/types/file_sort_by.py | 5 +
src/humanloop/types/project_sort_by.py | 5 -
.../types/version_id_response_version.py | 8 +-
.../types/version_reference_response.py | 1 +
48 files changed, 1088 insertions(+), 1560 deletions(-)
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
delete mode 100644 src/humanloop/requests/agent_continue_call_response.py
delete mode 100644 src/humanloop/requests/agent_continue_call_response_tool_choice.py
delete mode 100644 src/humanloop/requests/agent_continue_call_stream_response.py
delete mode 100644 src/humanloop/requests/agent_continue_call_stream_response_payload.py
delete mode 100644 src/humanloop/types/agent_continue_call_response.py
delete mode 100644 src/humanloop/types/agent_continue_call_response_tool_choice.py
delete mode 100644 src/humanloop/types/agent_continue_call_stream_response.py
delete mode 100644 src/humanloop/types/agent_continue_call_stream_response_payload.py
create mode 100644 src/humanloop/types/file_sort_by.py
delete mode 100644 src/humanloop/types/project_sort_by.py
diff --git a/poetry.lock b/poetry.lock
index cfe8a240..a7354814 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -89,103 +89,103 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.4.1"
+version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
- {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
- {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
- {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"},
+ {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"},
+ {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.1"
+version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
- {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
+ {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
+ {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
]
[package.dependencies]
@@ -873,13 +873,13 @@ files = [
[[package]]
name = "openai"
-version = "1.76.2"
+version = "1.77.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
- {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
+ {file = "openai-1.77.0-py3-none-any.whl", hash = "sha256:07706e91eb71631234996989a8ea991d5ee56f0744ef694c961e0824d4f39218"},
+ {file = "openai-1.77.0.tar.gz", hash = "sha256:897969f927f0068b8091b4b041d1f8175bcf124f7ea31bab418bf720971223bc"},
]
[package.dependencies]
diff --git a/pyproject.toml b/pyproject.toml
index 9dddf812..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.36b1"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 26f361d0..a91bd12d 100644
--- a/reference.md
+++ b/reference.md
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1269,7 +1284,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Prompts by
@@ -3440,7 +3455,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Tools by
@@ -4742,7 +4757,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Datasets by
@@ -6321,7 +6336,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Evaluators by
@@ -8080,7 +8095,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Flows by
@@ -8858,52 +8873,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
-)
+client.agents.log()
```
@@ -9037,7 +9007,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9244,20 +9219,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
```
@@ -9362,21 +9325,18 @@ client.agents.update_log(
-
-Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+Call an Agent.
-If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
-pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
-The agent will run for the maximum number of iterations, or until it encounters a stop condition,
-according to its configuration.
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
-Agent details in the request body. A new version is created if it does not match
-any existing ones. This is helpful in the case where you are storing or deriving
-your Agent details in code.
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
@@ -9468,7 +9428,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9620,21 +9585,18 @@ Controls how the model uses tools. The following options are supported:
-
-Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
-If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
-pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+Call an Agent.
-The agent will run for the maximum number of iterations, or until it encounters a stop condition,
-according to its configuration.
+Calling an Agent calls the model provider before logging
+the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
-Agent details in the request body. A new version is created if it does not match
-any existing ones. This is helpful in the case where you are storing or deriving
-your Agent details in code.
+Agent details in the request body. In this case, we will check if the details correspond
+to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+in the case where you are storing or deriving your Agent details in code.
@@ -9654,15 +9616,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
-)
+client.agents.call()
```
@@ -9732,7 +9686,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9872,7 +9831,7 @@ Controls how the model uses tools. The following options are supported:
-client.agents.continue_call_stream(...)
+client.agents.continue_stream(...)
-
@@ -9886,13 +9845,13 @@ Controls how the model uses tools. The following options are supported:
Continue an incomplete Agent call.
-This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
-requested by the Agent. The Agent will resume processing from where it left off.
-
-The messages in the request will be appended to the original messages in the Log. You do not
-have to provide the previous conversation history.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -9912,7 +9871,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.agents.continue_call_stream(
+response = client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
@@ -9977,7 +9936,7 @@ for chunk in response.data:
-client.agents.continue_call(...)
+client.agents.continue_(...)
-
@@ -9991,13 +9950,13 @@ for chunk in response.data:
Continue an incomplete Agent call.
-This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
-requested by the Agent. The Agent will resume processing from where it left off.
-
-The messages in the request will be appended to the original messages in the Log. You do not
-have to provide the previous conversation history.
+This endpoint allows continuing an existing incomplete Agent call, using the context
+from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+
+The messages in the request will be appended
+to the original messages in the log.
@@ -10017,15 +9976,9 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
```
@@ -10118,14 +10071,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.agents.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
+client.agents.list()
```
@@ -10173,7 +10119,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Agents by
@@ -10241,42 +10187,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
```
@@ -10545,8 +10456,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
```
@@ -10624,10 +10535,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
```
@@ -10724,7 +10633,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10810,7 +10719,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10880,8 +10789,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
```
@@ -10975,7 +10883,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11226,7 +11134,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11299,12 +11207,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
```
@@ -11980,6 +11883,14 @@ client.files.list_files()
-
+**path:** `typing.Optional[str]` — Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -12004,7 +11915,7 @@ client.files.list_files()
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort files by
@@ -12020,6 +11931,14 @@ client.files.list_files()
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12098,6 +12017,14 @@ client.files.retrieve_by_path(
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -13396,6 +13323,14 @@ for page in response.iter_pages():
-
+**version_status:** `typing.Optional[VersionStatus]` — If provided, only Logs belonging to Versions with the specified status will be returned.
+
+
+
+
+
+-
+
**id:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — If provided, returns Logs whose IDs contain any of the specified values as substrings.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 8485d75c..46712075 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -6,10 +6,10 @@
AgentCallStreamResponse,
AgentCallStreamResponsePayload,
AgentConfigResponse,
- AgentContinueCallResponse,
- AgentContinueCallResponseToolChoice,
- AgentContinueCallStreamResponse,
- AgentContinueCallStreamResponsePayload,
+ AgentContinueResponse,
+ AgentContinueResponseToolChoice,
+ AgentContinueStreamResponse,
+ AgentContinueStreamResponsePayload,
AgentInlineTool,
AgentKernelRequest,
AgentKernelRequestReasoningEffort,
@@ -93,6 +93,7 @@
FileId,
FilePath,
FileRequest,
+ FileSortBy,
FileType,
FilesToolType,
FlowKernelRequest,
@@ -150,7 +151,6 @@
PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
- ProjectSortBy,
PromptCallLogResponse,
PromptCallResponse,
PromptCallResponseToolChoice,
@@ -204,6 +204,8 @@
from .errors import UnprocessableEntityError
from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoice,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffort,
@@ -214,8 +216,12 @@
AgentRequestTemplateParams,
AgentRequestToolsItem,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoice,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoice,
AgentsCallStreamRequestToolChoiceParams,
)
@@ -242,6 +248,8 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
@@ -252,8 +260,12 @@
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
@@ -263,10 +275,10 @@
AgentCallStreamResponseParams,
AgentCallStreamResponsePayloadParams,
AgentConfigResponseParams,
- AgentContinueCallResponseParams,
- AgentContinueCallResponseToolChoiceParams,
- AgentContinueCallStreamResponseParams,
- AgentContinueCallStreamResponsePayloadParams,
+ AgentContinueResponseParams,
+ AgentContinueResponseToolChoiceParams,
+ AgentContinueStreamResponseParams,
+ AgentContinueStreamResponsePayloadParams,
AgentInlineToolParams,
AgentKernelRequestParams,
AgentKernelRequestReasoningEffortParams,
@@ -435,14 +447,14 @@
"AgentCallStreamResponsePayloadParams",
"AgentConfigResponse",
"AgentConfigResponseParams",
- "AgentContinueCallResponse",
- "AgentContinueCallResponseParams",
- "AgentContinueCallResponseToolChoice",
- "AgentContinueCallResponseToolChoiceParams",
- "AgentContinueCallStreamResponse",
- "AgentContinueCallStreamResponseParams",
- "AgentContinueCallStreamResponsePayload",
- "AgentContinueCallStreamResponsePayloadParams",
+ "AgentContinueResponse",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayload",
+ "AgentContinueStreamResponsePayloadParams",
"AgentInlineTool",
"AgentInlineToolParams",
"AgentKernelRequest",
@@ -461,6 +473,8 @@
"AgentLinkedFileResponseFile",
"AgentLinkedFileResponseFileParams",
"AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentLogResponse",
@@ -487,8 +501,12 @@
"AgentResponseTemplateParams",
"AgentResponseToolsItem",
"AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
"AnthropicRedactedThinkingContent",
@@ -622,6 +640,7 @@
"FilePathParams",
"FileRequest",
"FileRequestParams",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -725,7 +744,6 @@
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
"PopulateTemplateResponseTemplateParams",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallLogResponseParams",
"PromptCallResponse",
@@ -742,6 +760,8 @@
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -764,8 +784,12 @@
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
index 04260714..ab2a2f9e 100644
--- a/src/humanloop/agents/__init__.py
+++ b/src/humanloop/agents/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentLogRequestAgent,
AgentLogRequestToolChoice,
AgentRequestReasoningEffort,
AgentRequestStop,
AgentRequestTemplate,
AgentRequestToolsItem,
+ AgentsCallRequestAgent,
AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
AgentsCallStreamRequestToolChoice,
)
from .requests import (
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffortParams,
AgentRequestStopParams,
AgentRequestTemplateParams,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffort",
@@ -30,8 +38,12 @@
"AgentRequestTemplateParams",
"AgentRequestToolsItem",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index 64f3de62..c87a10a6 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -5,29 +5,24 @@
from .raw_client import RawAgentsClient
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
from ..types.create_agent_log_response import CreateAgentLogResponse
-from ..types.agent_log_response import AgentLogResponse
+from ..types.log_response import LogResponse
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
-from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
-from ..types.agent_continue_call_response import AgentContinueCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.agent_response import AgentResponse
from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -36,6 +31,7 @@
from ..requests.response_format import ResponseFormatParams
from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
from ..types.list_agents import ListAgents
from ..types.file_environment_response import FileEnvironmentResponse
from ..requests.evaluator_activation_deactivation_request_activate_item import (
@@ -47,7 +43,6 @@
from ..types.agent_kernel_request import AgentKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawAgentsClient
-from ..core.pagination import AsyncPager
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +80,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -164,8 +159,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -239,52 +237,7 @@ def log(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ client.agents.log()
"""
response = self._raw_client.log(
version_id=version_id,
@@ -337,7 +290,7 @@ def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentLogResponse:
+ ) -> LogResponse:
"""
Update a Log.
@@ -374,7 +327,7 @@ def update_log(
Returns
-------
- AgentLogResponse
+ LogResponse
Successful Response
Examples
@@ -385,20 +338,8 @@ def update_log(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
"""
response = self._raw_client.update_log(
@@ -423,7 +364,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -442,21 +383,18 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -482,8 +420,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -585,7 +526,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -604,21 +545,18 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -644,8 +582,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -707,15 +648,7 @@ def call(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ client.agents.call()
"""
response = self._raw_client.call(
version_id=version_id,
@@ -744,7 +677,7 @@ def call(
)
return response.data
- def continue_call_stream(
+ def continue_stream(
self,
*,
log_id: str,
@@ -752,18 +685,18 @@ def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[AgentContinueCallStreamResponse]:
+ ) -> typing.Iterator[AgentContinueStreamResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -783,7 +716,7 @@ def continue_call_stream(
Yields
------
- typing.Iterator[AgentContinueCallStreamResponse]
+ typing.Iterator[AgentContinueStreamResponse]
Examples
@@ -793,14 +726,14 @@ def continue_call_stream(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- response = client.agents.continue_call_stream(
+ response = client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
for chunk in response:
yield chunk
"""
- with self._raw_client.continue_call_stream(
+ with self._raw_client.continue_stream(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -809,7 +742,7 @@ def continue_call_stream(
) as r:
yield from r.data
- def continue_call(
+ def continue_(
self,
*,
log_id: str,
@@ -817,18 +750,18 @@ def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentContinueCallResponse:
+ ) -> AgentContinueResponse:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -848,7 +781,7 @@ def continue_call(
Returns
-------
- AgentContinueCallResponse
+ AgentContinueResponse
Examples
@@ -858,18 +791,12 @@ def continue_call(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
"""
- response = self._raw_client.continue_call(
+ response = self._raw_client.continue_(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -885,10 +812,10 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> SyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -906,7 +833,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -917,7 +844,7 @@ def list(
Returns
-------
- SyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -927,64 +854,18 @@ def list(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- response = client.agents.list(
- size=1,
- )
- for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- for page in response.iter_pages():
- yield page
- """
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
def upsert(
self,
@@ -1123,42 +1004,7 @@ def upsert(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
"""
response = self._raw_client.upsert(
@@ -1220,8 +1066,8 @@ def delete_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
@@ -1269,10 +1115,8 @@ def patch_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.patch_agent_version(
@@ -1321,7 +1165,7 @@ def get(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.get(
@@ -1353,7 +1197,7 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.delete(id, request_options=request_options)
@@ -1401,8 +1245,7 @@ def move(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
"""
response = self._raw_client.move(
@@ -1444,7 +1287,7 @@ def list_versions(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_versions(
@@ -1564,7 +1407,7 @@ def list_environments(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_environments(id, request_options=request_options)
@@ -1610,12 +1453,7 @@ def update_monitoring(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
"""
response = self._raw_client.update_monitoring(
@@ -1630,7 +1468,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -1656,7 +1494,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1740,7 +1579,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1819,8 +1658,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1899,52 +1741,7 @@ async def log(
async def main() -> None:
- await client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ await client.agents.log()
asyncio.run(main())
@@ -2000,7 +1797,7 @@ async def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentLogResponse:
+ ) -> LogResponse:
"""
Update a Log.
@@ -2037,7 +1834,7 @@ async def update_log(
Returns
-------
- AgentLogResponse
+ LogResponse
Successful Response
Examples
@@ -2053,20 +1850,8 @@ async def update_log(
async def main() -> None:
await client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
@@ -2094,7 +1879,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2113,21 +1898,18 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2153,8 +1935,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2265,7 +2050,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2284,21 +2069,18 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2324,8 +2106,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2392,15 +2177,7 @@ async def call(
async def main() -> None:
- await client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ await client.agents.call()
asyncio.run(main())
@@ -2432,7 +2209,7 @@ async def main() -> None:
)
return response.data
- async def continue_call_stream(
+ async def continue_stream(
self,
*,
log_id: str,
@@ -2440,18 +2217,18 @@ async def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[AgentContinueCallStreamResponse]:
+ ) -> typing.AsyncIterator[AgentContinueStreamResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2471,7 +2248,7 @@ async def continue_call_stream(
Yields
------
- typing.AsyncIterator[AgentContinueCallStreamResponse]
+ typing.AsyncIterator[AgentContinueStreamResponse]
Examples
@@ -2486,7 +2263,7 @@ async def continue_call_stream(
async def main() -> None:
- response = await client.agents.continue_call_stream(
+ response = await client.agents.continue_stream(
log_id="log_id",
messages=[{"role": "user"}],
)
@@ -2496,7 +2273,7 @@ async def main() -> None:
asyncio.run(main())
"""
- async with self._raw_client.continue_call_stream(
+ async with self._raw_client.continue_stream(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -2506,7 +2283,7 @@ async def main() -> None:
async for data in r.data:
yield data
- async def continue_call(
+ async def continue_(
self,
*,
log_id: str,
@@ -2514,18 +2291,18 @@ async def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AgentContinueCallResponse:
+ ) -> AgentContinueResponse:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2545,7 +2322,7 @@ async def continue_call(
Returns
-------
- AgentContinueCallResponse
+ AgentContinueResponse
Examples
@@ -2560,21 +2337,15 @@ async def continue_call(
async def main() -> None:
- await client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ await client.agents.continue_(
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
asyncio.run(main())
"""
- response = await self._raw_client.continue_call(
+ response = await self._raw_client.continue_(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
@@ -2590,10 +2361,10 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -2611,7 +2382,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -2622,7 +2393,7 @@ async def list(
Returns
-------
- AsyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -2637,67 +2408,21 @@ async def list(
async def main() -> None:
- response = await client.agents.list(
- size=1,
- )
- async for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- async for page in response.iter_pages():
- yield page
+ await client.agents.list()
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
async def upsert(
self,
@@ -2841,42 +2566,7 @@ async def upsert(
async def main() -> None:
await client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
@@ -2946,8 +2636,8 @@ async def delete_agent_version(
async def main() -> None:
await client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
@@ -3003,10 +2693,8 @@ async def patch_agent_version(
async def main() -> None:
await client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
@@ -3063,7 +2751,7 @@ async def get(
async def main() -> None:
await client.agents.get(
- id="ag_1234567890",
+ id="id",
)
@@ -3103,7 +2791,7 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
async def main() -> None:
await client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
@@ -3159,8 +2847,7 @@ async def move(
async def main() -> None:
await client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
@@ -3210,7 +2897,7 @@ async def list_versions(
async def main() -> None:
await client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
@@ -3354,7 +3041,7 @@ async def list_environments(
async def main() -> None:
await client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
@@ -3408,15 +3095,7 @@ async def update_monitoring(
async def main() -> None:
await client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {
- "evaluator_id": "ev_2345678901",
- "environment_id": "env_1234567890",
- },
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
@@ -3434,7 +3113,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -3460,7 +3139,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index b13491a6..6e3e1718 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -16,17 +16,22 @@
from ..types.http_validation_error import HttpValidationError
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
-from ..types.agent_log_response import AgentLogResponse
+from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
import httpx_sse
import contextlib
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
-from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
-from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.agent_continue_stream_response import AgentContinueStreamResponse
+from ..types.agent_continue_response import AgentContinueResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -73,7 +78,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -152,8 +157,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -247,7 +255,7 @@ def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -312,7 +320,7 @@ def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[AgentLogResponse]:
+ ) -> HttpResponse[LogResponse]:
"""
Update a Log.
@@ -349,7 +357,7 @@ def update_log(
Returns
-------
- HttpResponse[AgentLogResponse]
+ HttpResponse[LogResponse]
Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
@@ -376,9 +384,9 @@ def update_log(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentLogResponse,
+ LogResponse,
construct_type(
- type_=AgentLogResponse, # type: ignore
+ type_=LogResponse, # type: ignore
object_=_response.json(),
),
)
@@ -408,7 +416,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -427,21 +435,18 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -467,8 +472,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -540,7 +548,7 @@ def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -611,7 +619,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -630,21 +638,18 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -670,8 +675,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -743,7 +751,7 @@ def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -796,7 +804,7 @@ def call(
raise ApiError(status_code=_response.status_code, body=_response_json)
@contextlib.contextmanager
- def continue_call_stream(
+ def continue_stream(
self,
*,
log_id: str,
@@ -804,18 +812,18 @@ def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]:
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -835,7 +843,7 @@ def continue_call_stream(
Yields
------
- typing.Iterator[HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]]
+ typing.Iterator[HttpResponse[typing.Iterator[AgentContinueStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -859,7 +867,7 @@ def continue_call_stream(
omit=OMIT,
) as _response:
- def stream() -> HttpResponse[typing.Iterator[AgentContinueCallStreamResponse]]:
+ def stream() -> HttpResponse[typing.Iterator[AgentContinueStreamResponse]]:
try:
if 200 <= _response.status_code < 300:
@@ -893,7 +901,7 @@ def _iter():
yield stream()
- def continue_call(
+ def continue_(
self,
*,
log_id: str,
@@ -901,18 +909,18 @@ def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[AgentContinueCallResponse]:
+ ) -> HttpResponse[AgentContinueResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -932,7 +940,7 @@ def continue_call(
Returns
-------
- HttpResponse[AgentContinueCallResponse]
+ HttpResponse[AgentContinueResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -958,9 +966,89 @@ def continue_call(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentContinueCallResponse,
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
construct_type(
- type_=AgentContinueCallResponse, # type: ignore
+ type_=PaginatedDataAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -1770,7 +1858,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -1796,7 +1884,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -1809,7 +1898,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1905,7 +1994,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1984,8 +2073,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2079,7 +2171,7 @@ async def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2144,7 +2236,7 @@ async def update_log(
error: typing.Optional[str] = OMIT,
log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[AgentLogResponse]:
+ ) -> AsyncHttpResponse[LogResponse]:
"""
Update a Log.
@@ -2181,7 +2273,7 @@ async def update_log(
Returns
-------
- AsyncHttpResponse[AgentLogResponse]
+ AsyncHttpResponse[LogResponse]
Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2208,9 +2300,9 @@ async def update_log(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentLogResponse,
+ LogResponse,
construct_type(
- type_=AgentLogResponse, # type: ignore
+ type_=LogResponse, # type: ignore
object_=_response.json(),
),
)
@@ -2240,7 +2332,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2259,21 +2351,18 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
-
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2299,8 +2388,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2372,7 +2464,7 @@ async def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2443,7 +2535,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2462,21 +2554,18 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
-
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ Call an Agent.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ Calling an Agent calls the model provider before logging
+ the request, responses and metadata to Humanloop.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Agent. Otherwise the default deployed version will be chosen.
Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Agent details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Agent. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Agent details in code.
Parameters
----------
@@ -2502,8 +2591,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2575,7 +2667,7 @@ async def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2628,7 +2720,7 @@ async def call(
raise ApiError(status_code=_response.status_code, body=_response_json)
@contextlib.asynccontextmanager
- async def continue_call_stream(
+ async def continue_stream(
self,
*,
log_id: str,
@@ -2636,18 +2728,18 @@ async def continue_call_stream(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]:
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2667,7 +2759,7 @@ async def continue_call_stream(
Yields
------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]]
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2691,7 +2783,7 @@ async def continue_call_stream(
omit=OMIT,
) as _response:
- async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueCallStreamResponse]]:
+ async def stream() -> AsyncHttpResponse[typing.AsyncIterator[AgentContinueStreamResponse]]:
try:
if 200 <= _response.status_code < 300:
@@ -2725,7 +2817,7 @@ async def _iter():
yield await stream()
- async def continue_call(
+ async def continue_(
self,
*,
log_id: str,
@@ -2733,18 +2825,18 @@ async def continue_call(
provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT,
include_trace_children: typing.Optional[bool] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[AgentContinueCallResponse]:
+ ) -> AsyncHttpResponse[AgentContinueResponse]:
"""
Continue an incomplete Agent call.
- This endpoint allows continuing an existing incomplete Agent call, by passing the tool call
- requested by the Agent. The Agent will resume processing from where it left off.
-
- The messages in the request will be appended to the original messages in the Log. You do not
- have to provide the previous conversation history.
+ This endpoint allows continuing an existing incomplete Agent call, using the context
+ from the previous interaction. The Agent will resume processing from where it left off.
The original log must be in an incomplete state to be continued.
+ The messages in the request will be appended
+ to the original messages in the log.
+
Parameters
----------
log_id : str
@@ -2764,7 +2856,7 @@ async def continue_call(
Returns
-------
- AsyncHttpResponse[AgentContinueCallResponse]
+ AsyncHttpResponse[AgentContinueResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2790,9 +2882,89 @@ async def continue_call(
try:
if 200 <= _response.status_code < 300:
_data = typing.cast(
- AgentContinueCallResponse,
+ AgentContinueResponse,
+ construct_type(
+ type_=AgentContinueResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
construct_type(
- type_=AgentContinueCallResponse, # type: ignore
+ type_=PaginatedDataAgentResponse, # type: ignore
object_=_response.json(),
),
)
@@ -3604,7 +3776,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -3630,7 +3802,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -3643,7 +3816,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
index 78a8f9ec..06ce37ed 100644
--- a/src/humanloop/agents/requests/__init__.py
+++ b/src/humanloop/agents/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgentParams
from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .agent_request_stop import AgentRequestStopParams
from .agent_request_template import AgentRequestTemplateParams
from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
__all__ = [
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffortParams",
"AgentRequestStopParams",
"AgentRequestTemplateParams",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
index 73d98669..9c8a955c 100644
--- a/src/humanloop/agents/types/__init__.py
+++ b/src/humanloop/agents/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgent
from .agent_log_request_tool_choice import AgentLogRequestToolChoice
from .agent_request_reasoning_effort import AgentRequestReasoningEffort
from .agent_request_stop import AgentRequestStop
from .agent_request_template import AgentRequestTemplate
from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
__all__ = [
+ "AgentLogRequestAgent",
"AgentLogRequestToolChoice",
"AgentRequestReasoningEffort",
"AgentRequestStop",
"AgentRequestTemplate",
"AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
"AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
"AgentsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 71036800..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.36b1",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.36b1",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py
index 0e70c39c..8ab493a3 100644
--- a/src/humanloop/datasets/client.py
+++ b/src/humanloop/datasets/client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawDatasetsClient
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.pagination import SyncPager
@@ -55,7 +55,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[DatasetResponse]:
@@ -76,7 +76,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
@@ -857,7 +857,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[DatasetResponse]:
@@ -878,7 +878,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 88ec32c8..7fb13a71 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -10,7 +10,7 @@
from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
from ..core.request_options import RequestOptions
from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.evaluator_response import EvaluatorResponse
@@ -234,7 +234,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[EvaluatorResponse]:
@@ -255,7 +255,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
@@ -1032,7 +1032,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[EvaluatorResponse]:
@@ -1053,7 +1053,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index 693b46cb..b4fd4274 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawFilesClient
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
@@ -39,11 +39,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -60,6 +62,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +74,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -96,11 +104,13 @@ def list_files(
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -110,6 +120,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +134,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -143,7 +157,10 @@ def retrieve_by_path(
)
"""
response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
@@ -169,11 +186,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -190,6 +209,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -199,12 +221,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -234,11 +259,13 @@ async def main() -> None:
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -248,6 +275,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +289,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +320,9 @@ async def main() -> None:
asyncio.run(main())
"""
response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 01b48e03..02f371ac 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
@@ -33,11 +33,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -56,6 +58,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -65,12 +70,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,11 +94,13 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -124,6 +134,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -137,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -150,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
@@ -196,11 +211,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -219,6 +236,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -228,12 +248,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -249,11 +272,13 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -287,6 +312,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -300,6 +326,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -313,6 +342,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index bcb9491c..9ba82a6a 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -11,7 +11,7 @@
from ..types.create_flow_log_response import CreateFlowLogResponse
from ..types.flow_log_response import FlowLogResponse
from ..types.flow_response import FlowResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.paginated_data_flow_response import PaginatedDataFlowResponse
@@ -469,7 +469,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[FlowResponse]:
@@ -490,7 +490,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
@@ -1418,7 +1418,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[FlowResponse]:
@@ -1439,7 +1439,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 8733ed37..b16d1f6b 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -42,6 +42,7 @@ def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
+ version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -70,6 +71,9 @@ def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
+ version_status : typing.Optional[VersionStatus]
+ If provided, only Logs belonging to Versions with the specified status will be returned.
+
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -131,6 +135,7 @@ def list(
"page": page,
"size": size,
"version_id": version_id,
+ "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -158,6 +163,7 @@ def list(
page=page + 1,
size=size,
version_id=version_id,
+ version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
@@ -275,6 +281,7 @@ async def list(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
version_id: typing.Optional[str] = None,
+ version_status: typing.Optional[VersionStatus] = None,
id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
search: typing.Optional[str] = None,
metadata_search: typing.Optional[str] = None,
@@ -303,6 +310,9 @@ async def list(
version_id : typing.Optional[str]
If provided, only Logs belonging to the specified Version will be returned.
+ version_status : typing.Optional[VersionStatus]
+ If provided, only Logs belonging to Versions with the specified status will be returned.
+
id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
If provided, returns Logs whose IDs contain any of the specified values as substrings.
@@ -372,6 +382,7 @@ async def main() -> None:
"page": page,
"size": size,
"version_id": version_id,
+ "version_status": version_status,
"id": id,
"search": search,
"metadata_search": metadata_search,
@@ -399,6 +410,7 @@ async def main() -> None:
page=page + 1,
size=size,
version_id=version_id,
+ version_status=version_status,
id=id,
search=search,
metadata_search=metadata_search,
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index ae141d57..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
@@ -30,8 +38,12 @@
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index d5de327b..a81e8411 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,11 +13,13 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.prompt_response import PromptResponse
@@ -85,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -166,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -480,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -538,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -649,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -707,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[PromptResponse]:
@@ -864,7 +875,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -1607,7 +1618,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -1633,7 +1644,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1719,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1800,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2131,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2189,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2309,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2367,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2511,7 +2532,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[PromptResponse]:
@@ -2532,7 +2553,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -3379,7 +3400,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -3405,7 +3426,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 2b907d91..d7346742 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -73,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -154,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -249,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -496,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -554,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -633,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -706,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -764,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -1754,7 +1765,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -1780,7 +1791,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -1793,7 +1805,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1889,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1970,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2065,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2312,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2370,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2449,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2522,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2580,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2659,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -3572,7 +3593,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -3598,7 +3619,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -3611,7 +3633,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index 3971e252..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
"PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 1b849e7d..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
"PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index fb1580df..ba9f74af 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -5,10 +5,10 @@
from .agent_call_stream_response import AgentCallStreamResponseParams
from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from .agent_config_response import AgentConfigResponseParams
-from .agent_continue_call_response import AgentContinueCallResponseParams
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
-from .agent_continue_call_stream_response import AgentContinueCallStreamResponseParams
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
+from .agent_continue_response import AgentContinueResponseParams
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
+from .agent_continue_stream_response import AgentContinueStreamResponseParams
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
from .agent_inline_tool import AgentInlineToolParams
from .agent_kernel_request import AgentKernelRequestParams
from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
@@ -178,10 +178,10 @@
"AgentCallStreamResponseParams",
"AgentCallStreamResponsePayloadParams",
"AgentConfigResponseParams",
- "AgentContinueCallResponseParams",
- "AgentContinueCallResponseToolChoiceParams",
- "AgentContinueCallStreamResponseParams",
- "AgentContinueCallStreamResponsePayloadParams",
+ "AgentContinueResponseParams",
+ "AgentContinueResponseToolChoiceParams",
+ "AgentContinueStreamResponseParams",
+ "AgentContinueStreamResponsePayloadParams",
"AgentInlineToolParams",
"AgentKernelRequestParams",
"AgentKernelRequestReasoningEffortParams",
diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py
deleted file mode 100644
index 90938dea..00000000
--- a/src/humanloop/requests/agent_continue_call_response.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
-import typing
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
-from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-from .evaluator_log_response import EvaluatorLogResponseParams
-from .log_response import LogResponseParams
-
-
-class AgentContinueCallResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing_extensions.NotRequired[int]
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing_extensions.NotRequired[str]
- """
- Reason the generation finished.
- """
-
- messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing_extensions.NotRequired[AgentContinueCallResponseToolChoiceParams]
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponseParams
- """
- Agent that generated the Log.
- """
-
- start_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event started.
- """
-
- end_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event ended.
- """
-
- output: typing_extensions.NotRequired[str]
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing_extensions.NotRequired[dt.datetime]
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing_extensions.NotRequired[str]
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing_extensions.NotRequired[float]
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing_extensions.NotRequired[str]
- """
- Captured log and debug statements.
- """
-
- provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw request sent to provider.
- """
-
- provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw response received the provider.
- """
-
- inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- The inputs passed to the prompt template.
- """
-
- source: typing_extensions.NotRequired[str]
- """
- Identifies where the model was called from.
- """
-
- metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Any additional metadata to record.
- """
-
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing_extensions.NotRequired[str]
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing_extensions.NotRequired[typing.Sequence[str]]
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing_extensions.NotRequired[str]
- """
- End-user ID related to the Log.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing_extensions.NotRequired[bool]
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing_extensions.NotRequired[str]
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
deleted file mode 100644
index 4722dd2e..00000000
--- a/src/humanloop/requests/agent_continue_call_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoiceParams
-
-AgentContinueCallResponseToolChoiceParams = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
-]
diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py
deleted file mode 100644
index 3eb2b498..00000000
--- a/src/humanloop/requests/agent_continue_call_stream_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
-from ..types.event_type import EventType
-import datetime as dt
-
-
-class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing_extensions.NotRequired[AgentContinueCallStreamResponsePayloadParams]
- type: EventType
- created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
deleted file mode 100644
index 87e1562b..00000000
--- a/src/humanloop/requests/agent_continue_call_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponseParams
-from .log_response import LogResponseParams
-from .tool_call import ToolCallParams
-
-AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index ea6b14a2..f58fc3d8 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -10,7 +10,7 @@
from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.tool_response import ToolResponse
@@ -479,7 +479,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[ToolResponse]:
@@ -500,7 +500,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
@@ -1666,7 +1666,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[ToolResponse]:
@@ -1687,7 +1687,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 7c1d30f5..7814f611 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -5,10 +5,10 @@
from .agent_call_stream_response import AgentCallStreamResponse
from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .agent_config_response import AgentConfigResponse
-from .agent_continue_call_response import AgentContinueCallResponse
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
-from .agent_continue_call_stream_response import AgentContinueCallStreamResponse
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
+from .agent_continue_response import AgentContinueResponse
+from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
+from .agent_continue_stream_response import AgentContinueStreamResponse
+from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
from .agent_inline_tool import AgentInlineTool
from .agent_kernel_request import AgentKernelRequest
from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
@@ -94,6 +94,7 @@
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
+from .file_sort_by import FileSortBy
from .file_type import FileType
from .files_tool_type import FilesToolType
from .flow_kernel_request import FlowKernelRequest
@@ -155,7 +156,6 @@
from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
-from .project_sort_by import ProjectSortBy
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response import PromptCallResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -212,10 +212,10 @@
"AgentCallStreamResponse",
"AgentCallStreamResponsePayload",
"AgentConfigResponse",
- "AgentContinueCallResponse",
- "AgentContinueCallResponseToolChoice",
- "AgentContinueCallStreamResponse",
- "AgentContinueCallStreamResponsePayload",
+ "AgentContinueResponse",
+ "AgentContinueResponseToolChoice",
+ "AgentContinueStreamResponse",
+ "AgentContinueStreamResponsePayload",
"AgentInlineTool",
"AgentKernelRequest",
"AgentKernelRequestReasoningEffort",
@@ -299,6 +299,7 @@
"FileId",
"FilePath",
"FileRequest",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -356,7 +357,6 @@
"PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallResponse",
"PromptCallResponseToolChoice",
diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py
deleted file mode 100644
index c98af953..00000000
--- a/src/humanloop/types/agent_continue_call_response.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
-from .chat_message import ChatMessage
-import pydantic
-from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
-import datetime as dt
-from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AgentContinueCallResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing.Optional[str] = pydantic.Field(default=None)
- """
- Reason the generation finished.
- """
-
- messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing.Optional[AgentContinueCallResponseToolChoice] = pydantic.Field(default=None)
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponse = pydantic.Field()
- """
- Agent that generated the Log.
- """
-
- start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event started.
- """
-
- end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event ended.
- """
-
- output: typing.Optional[str] = pydantic.Field(default=None)
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing.Optional[str] = pydantic.Field(default=None)
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing.Optional[float] = pydantic.Field(default=None)
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing.Optional[str] = pydantic.Field(default=None)
- """
- Captured log and debug statements.
- """
-
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw request sent to provider.
- """
-
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw response received the provider.
- """
-
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- The inputs passed to the prompt template.
- """
-
- source: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifies where the model was called from.
- """
-
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Any additional metadata to record.
- """
-
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing.Optional[str] = pydantic.Field(default=None)
- """
- End-user ID related to the Log.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str = pydantic.Field()
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py
deleted file mode 100644
index 5b90e98d..00000000
--- a/src/humanloop/types/agent_continue_call_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoice
-
-AgentContinueCallResponseToolChoice = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
-]
diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py
deleted file mode 100644
index cdd34dce..00000000
--- a/src/humanloop/types/agent_continue_call_stream_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
-from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-
-
-class AgentContinueCallStreamResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing.Optional[AgentContinueCallStreamResponsePayload] = None
- type: EventType
- created_at: dt.datetime
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py
deleted file mode 100644
index 8e23829b..00000000
--- a/src/humanloop/types/agent_continue_call_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponse
-from .log_response import LogResponse
-from .tool_call import ToolCall
-
-AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py
new file mode 100644
index 00000000..b3135c3b
--- /dev/null
+++ b/src/humanloop/types/file_sort_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/project_sort_by.py b/src/humanloop/types/project_sort_by.py
deleted file mode 100644
index b8265b56..00000000
--- a/src/humanloop/types/project_sort_by.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProjectSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 1b74199f..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -3,6 +3,7 @@
from __future__ import annotations
import typing
from .dataset_response import DatasetResponse
+import typing
if typing.TYPE_CHECKING:
from .prompt_response import PromptResponse
@@ -11,10 +12,5 @@
from .flow_response import FlowResponse
from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse",
- "ToolResponse",
- DatasetResponse,
- "EvaluatorResponse",
- "FlowResponse",
- "AgentResponse",
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py
index a6a7783c..399361c8 100644
--- a/src/humanloop/types/version_reference_response.py
+++ b/src/humanloop/types/version_reference_response.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import typing
+import typing
if typing.TYPE_CHECKING:
from .version_deployment_response import VersionDeploymentResponse
From 0414fe6781d25794718284817e26edec9931a54a Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 6 May 2025 13:12:28 +0100
Subject: [PATCH 34/39] docs: capitalize Prompt and Agent in doc strings
---
src/humanloop/client.py | 10 +++++-----
src/humanloop/sync/sync_client.py | 12 ++++++------
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index fdfae13f..996b75ad 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -391,18 +391,18 @@ def pull(self,
environment: str | None = None,
path: str | None = None
) -> List[str]:
- """Pull prompt and agent files from Humanloop to local filesystem.
+ """Pull Prompt and Agent files from Humanloop to local filesystem.
This method will:
- 1. Fetch prompt and agent files from your Humanloop workspace
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
2. Save them to the local filesystem using the client's files_directory (set during initialization)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
The path parameter can be used in two ways:
- If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
- - If it points to a directory (e.g. "path/to/directory"), all prompt and agent files in that directory will be pulled
- - If no path is provided, all prompt and agent files will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory will be pulled
+ - If no path is provided, all Prompt and Agent files will be pulled
The operation will overwrite existing files with the latest version from Humanloop
but will not delete local files that don't exist in the remote workspace.
@@ -422,7 +422,7 @@ def pull(self,
:param environment: The environment to pull the files from.
:param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
- If not provided, all prompt and agent files will be pulled.
+ If not provided, all Prompt and Agent files will be pulled.
:return: List of successfully processed file paths.
"""
return self._sync_client.pull(
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index d890d1fd..ce82ebcb 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -62,7 +62,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
Args:
path: The normalized path to the file (without extension)
- file_type: The type of file (prompt or agent)
+ file_type: The type of file (Prompt or Agent)
Returns:
The file content
@@ -95,7 +95,7 @@ def get_file_content(self, path: str, file_type: FileType) -> str:
Args:
path: The normalized path to the file (without extension)
- file_type: The type of file (prompt or agent)
+ file_type: The type of file (Prompt or Agent)
Returns:
The file content
@@ -154,8 +154,8 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
Args:
serialized_content: The content to save
- file_path: The path where to save the file
- file_type: The type of file (prompt or agent)
+ file_path: The path to save the file to
+ file_type: The type of file (Prompt or Agent)
Raises:
Exception: If there is an error saving the file
@@ -207,7 +207,7 @@ def _pull_directory(self,
directory: str | None = None,
environment: str | None = None,
) -> List[str]:
- """Sync prompt and agent files from Humanloop to local filesystem.
+ """Sync Prompt and Agent files from Humanloop to local filesystem.
If `path` is provided, only the files under that path will be pulled.
If `environment` is provided, the files will be pulled from that environment.
@@ -241,7 +241,7 @@ def _pull_directory(self,
# Process each file
for file in response.records:
- # Skip if not a prompt or agent
+ # Skip if not a Prompt or Agent
if file.type not in ["prompt", "agent"]:
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
From 550fb44135ef42f510773d3922858b4d04369ad0 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 6 May 2025 14:57:59 +0100
Subject: [PATCH 35/39] Improve overloading and warnings; content -> raw file
content
---
src/humanloop/overload.py | 49 ++++++++++++++++++++++---------
src/humanloop/sync/sync_client.py | 30 +++++++++----------
2 files changed, 50 insertions(+), 29 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 43403b8c..bd409236 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -2,7 +2,7 @@
import logging
import types
import warnings
-from typing import TypeVar, Union, Literal, Optional
+from typing import TypeVar, Union
from pathlib import Path
from humanloop.context import (
get_decorator_context,
@@ -143,10 +143,25 @@ def overload_with_local_files(
) -> Union[PromptsClient, AgentsClient]:
"""Overload call and log methods to handle local files when use_local_files is True.
- When use_local_files is True:
- - If only path is specified (no version_id or environment), attempts to use local file
- - If local file is not found or cannot be read, raises an error
- - If version_id and/or environment is specified, uses remote version with a warning
+ When use_local_files is True, the following prioritization strategy is used:
+ 1. Direct Parameters: If {file_type} parameters are provided directly (as a PromptKernelRequestParams or AgentKernelRequestParams object),
+ these take precedence and the local file is ignored.
+ 2. Version/Environment: If version_id or environment is specified, the remote version is used instead
+ of the local file.
+ 3. Local File: If neither of the above are specified, attempts to use the local file at the given path.
+
+ For example, with a prompt client:
+ - If prompt={model: "gpt-4", ...} is provided, uses those parameters directly
+ - If version_id="123" is provided, uses that remote version
+ - Otherwise, tries to load from the local file at the given path
+
+ Args:
+ client: The client to overload (PromptsClient or AgentsClient)
+ sync_client: The sync client used for file operations
+ use_local_files: Whether to enable local file handling
+
+ Returns:
+ The client with overloaded methods
Raises:
HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
@@ -156,28 +171,34 @@ def overload_with_local_files(
file_type = _get_file_type_from_client(client)
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
- if "id" and "path" in kwargs:
+ if "id" in kwargs and "path" in kwargs:
raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
# Handle local files if enabled
if use_local_files and "path" in kwargs:
# Check if version_id or environment is specified
has_version_info = "version_id" in kwargs or "environment" in kwargs
+ normalized_path = sync_client._normalize_path(kwargs["path"])
if has_version_info:
- warnings.warn(
- f"Ignoring local file for {kwargs['path']} as version_id or environment was specified. "
- "Using remote version instead.",
- UserWarning
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as version_id or environment was specified. "
+ "Using remote version instead."
)
else:
# Only use local file if no version info is specified
- normalized_path = sync_client._normalize_path(kwargs["path"])
try:
- file_content = sync_client.get_file_content(normalized_path, file_type)
- kwargs[file_type] = file_content
+ # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
+ if file_type in kwargs and not isinstance(kwargs[file_type], str):
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
+ "Using provided parameters instead."
+ )
+ else:
+ file_content = sync_client.get_file_content(normalized_path, file_type)
+ kwargs[file_type] = file_content
except (HumanloopRuntimeError) as e:
# Re-raise with more context
- raise HumanloopRuntimeError(f"Failed to use local file for {kwargs['path']}: {str(e)}")
+ raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
try:
if function_name == "call":
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index ce82ebcb..c609c188 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -65,7 +65,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
file_type: The type of file (Prompt or Agent)
Returns:
- The file content
+ The raw file content
Raises:
HumanloopRuntimeError: If the file doesn't exist or can't be read
@@ -79,7 +79,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
raise HumanloopRuntimeError(f"Local file not found: {local_path}")
try:
- # Read the file content
+ # Read the raw file content
with open(local_path) as f:
file_content = f.read()
logger.debug(f"Using local file content from {local_path}")
@@ -88,7 +88,7 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
def get_file_content(self, path: str, file_type: FileType) -> str:
- """Get the content of a file from cache or filesystem.
+ """Get the raw file content of a file from cache or filesystem.
This method uses an LRU cache to store file contents. When the cache is full,
the least recently accessed files are automatically removed to make space.
@@ -98,7 +98,7 @@ def get_file_content(self, path: str, file_type: FileType) -> str:
file_type: The type of file (Prompt or Agent)
Returns:
- The file content
+ The raw file content
Raises:
HumanloopRuntimeError: If the file doesn't exist or can't be read
@@ -153,7 +153,7 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
"""Save serialized file to local filesystem.
Args:
- serialized_content: The content to save
+ serialized_content: The raw file content to save
file_path: The path to save the file to
file_type: The type of file (Prompt or Agent)
@@ -169,7 +169,7 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
# Add file type extension
new_path = full_path.parent / f"{full_path.stem}.{file_type}"
- # Write content to file
+ # Write raw file content to file
with open(new_path, "w") as f:
f.write(serialized_content)
@@ -195,16 +195,16 @@ def _pull_file(self, path: str, environment: str | None = None) -> None:
file = self.client.files.retrieve_by_path(
path=path,
environment=environment,
- include_content=True
+ include_raw_file_content=True
)
if file.type not in ["prompt", "agent"]:
raise ValueError(f"Unsupported file type: {file.type}")
- self._save_serialized_file(file.content, file.path, file.type)
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
def _pull_directory(self,
- directory: str | None = None,
+ path: str | None = None,
environment: str | None = None,
) -> List[str]:
"""Sync Prompt and Agent files from Humanloop to local filesystem.
@@ -231,9 +231,9 @@ def _pull_directory(self,
response = self.client.files.list_files(
type=["prompt", "agent"],
page=page,
- include_content=True,
+ include_raw_file_content=True,
environment=environment,
- directory=directory
+ path=path
)
if len(response.records) == 0:
@@ -246,13 +246,13 @@ def _pull_directory(self,
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
- # Skip if no content
- if not getattr(file, "content", None):
+ # Skip if no raw file content
+ if not getattr(file, "raw_file_content", None):
logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
continue
try:
- self._save_serialized_file(file.content, file.path, file.type)
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
successful_files.append(file.path)
except Exception as e:
failed_files.append(file.path)
@@ -275,7 +275,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
"""Pull files from Humanloop to local filesystem.
If the path ends with .prompt or .agent, pulls that specific file.
- Otherwise, pulls all files under the specified directory path.
+ Otherwise, pulls all files under the specified path.
If no path is provided, pulls all files from the root.
Args:
From 010d82e11b9e2001be7e134a3bd20e3cb5426612 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 13:07:19 +0100
Subject: [PATCH 36/39] Improve error message formatting in sync client
---
src/humanloop/sync/sync_client.py | 26 +++++++++++++++++++++++++-
1 file changed, 25 insertions(+), 1 deletion(-)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index c609c188..aefcad67 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -6,6 +6,7 @@
from .metadata_handler import MetadataHandler
import time
from humanloop.error import HumanloopRuntimeError
+import json
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -22,6 +23,28 @@
# Default cache size for file content caching
DEFAULT_CACHE_SIZE = 100
+def format_api_error(error: Exception) -> str:
+ """Format API error messages to be more user-friendly."""
+ error_msg = str(error)
+ if "status_code" not in error_msg or "body" not in error_msg:
+ return error_msg
+
+ try:
+ # Extract the body part and parse as JSON
+ body_str = error_msg.split("body: ")[1]
+ # Convert Python dict string to valid JSON by replacing single quotes with double quotes
+ body_str = body_str.replace("'", '"')
+ body = json.loads(body_str)
+
+ # Get the detail from the body
+ detail = body.get("detail", {})
+
+ # Prefer description, fall back to msg
+ return detail.get("description") or detail.get("msg") or error_msg
+ except Exception as e:
+ logger.debug(f"Failed to parse error message: {str(e)}")
+ return error_msg
+
class SyncClient:
"""Client for managing synchronization between local filesystem and Humanloop.
@@ -260,7 +283,8 @@ def _pull_directory(self,
page += 1
except Exception as e:
- raise HumanloopRuntimeError(f"Failed to fetch page {page}: {str(e)}")
+ formatted_error = format_api_error(e)
+ raise HumanloopRuntimeError(f"Failed to pull files: {formatted_error}")
# Log summary only if we have results
if successful_files or failed_files:
From e0a9d53e133a49536e00f122f88ecb074c76482e Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 15:14:20 +0100
Subject: [PATCH 37/39] Improve logging in sync client and CLI
---
src/humanloop/cli/__main__.py | 57 +++++++++---
src/humanloop/cli/progress.py | 120 +++++++++++++++++++++++++
src/humanloop/sync/metadata_handler.py | 3 +-
src/humanloop/sync/sync_client.py | 95 ++++++++------------
4 files changed, 202 insertions(+), 73 deletions(-)
create mode 100644 src/humanloop/cli/progress.py
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index e31793f1..5f5aaa8a 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -9,6 +9,7 @@
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
from datetime import datetime
+from humanloop.cli.progress import progress_context
# Set up logging
logger = logging.getLogger(__name__)
@@ -25,6 +26,8 @@
INFO_COLOR = "blue"
WARNING_COLOR = "yellow"
+MAX_FILES_TO_DISPLAY = 10
+
def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
"""Get a Humanloop client instance."""
if not api_key:
@@ -65,7 +68,7 @@ def common_options(f: Callable) -> Callable:
)
@click.option(
"--base-dir",
- help="Base directory for synced files",
+ help="Base directory for pulled files",
default="humanloop",
type=click.Path(),
)
@@ -116,9 +119,23 @@ def cli():
help="Environment to pull from (e.g. 'production', 'staging')",
default=None,
)
+@click.option(
+ "--verbose",
+ "-v",
+ is_flag=True,
+ help="Show detailed progress information",
+)
@handle_sync_errors
@common_options
-def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str]):
+def pull(
+ path: Optional[str],
+ environment: Optional[str],
+ api_key: Optional[str],
+ env_file: Optional[str],
+ base_dir: str,
+ base_url: Optional[str],
+ verbose: bool
+):
"""Pull prompt and agent files from Humanloop to your local filesystem.
\b
@@ -143,14 +160,18 @@ def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str]
Currently only supports syncing prompt and agent files. Other file types will be skipped."""
client = get_client(api_key, env_file, base_url)
- sync_client = SyncClient(client, base_dir=base_dir)
+ sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
-
click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
-
- successful_files = sync_client.pull(path, environment)
+
+ if verbose:
+ # Don't use the spinner in verbose mode as the spinner and sync client logging compete
+ successful_files = sync_client.pull(path, environment)
+ else:
+ with progress_context("Pulling files..."):
+ successful_files = sync_client.pull(path, environment)
# Get metadata about the operation
metadata = sync_client.metadata.get_last_operation()
@@ -158,14 +179,24 @@ def pull(path: Optional[str], environment: Optional[str], api_key: Optional[str]
# Determine if the operation was successful based on failed_files
is_successful = not metadata.get('failed_files') and not metadata.get('error')
duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
- click.echo(click.style(f"\nSync completed in {metadata['duration_ms']}ms", fg=duration_color))
+ click.echo(click.style(f"Pull completed in {metadata['duration_ms']}ms", fg=duration_color))
if metadata['successful_files']:
- click.echo(click.style(f"\nSuccessfully synced {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
- for file in metadata['successful_files']:
- click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+ click.echo(click.style(f"\nSuccessfully pulled {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
+
+ if verbose:
+ for file in metadata['successful_files']:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+ else:
+ files_to_display = metadata['successful_files'][:MAX_FILES_TO_DISPLAY]
+ for file in files_to_display:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+
+ if len(metadata['successful_files']) > MAX_FILES_TO_DISPLAY:
+ remaining = len(metadata['successful_files']) - MAX_FILES_TO_DISPLAY
+ click.echo(click.style(f" ...and {remaining} more", fg=SUCCESS_COLOR))
if metadata['failed_files']:
- click.echo(click.style(f"\nFailed to sync {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
+ click.echo(click.style(f"\nFailed to pull {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
for file in metadata['failed_files']:
click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
if metadata.get('error'):
@@ -214,9 +245,9 @@ def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base
click.echo(f"Environment: {op['environment']}")
click.echo(f"Duration: {op['duration_ms']}ms")
if op['successful_files']:
- click.echo(click.style(f"Successfully synced {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
+ click.echo(click.style(f"Successfully {op['operation_type']}ed {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
if op['failed_files']:
- click.echo(click.style(f"Failed to sync {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
+ click.echo(click.style(f"Failed to {op['operation_type']}ed {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
if op['error']:
click.echo(click.style(f"Error: {op['error']}", fg=ERROR_COLOR))
click.echo(click.style("----------------------", fg=INFO_COLOR))
diff --git a/src/humanloop/cli/progress.py b/src/humanloop/cli/progress.py
new file mode 100644
index 00000000..67ef4506
--- /dev/null
+++ b/src/humanloop/cli/progress.py
@@ -0,0 +1,120 @@
+import sys
+import time
+from typing import Optional, Callable, Any
+from threading import Thread, Event
+from contextlib import contextmanager
+
+class Spinner:
+ """A simple terminal spinner for indicating progress."""
+
+ def __init__(
+ self,
+ message: str = "Loading...",
+ delay: float = 0.1,
+ spinner_chars: str = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
+ ):
+ self.message = message
+ self.delay = delay
+ self.spinner_chars = spinner_chars
+ self.stop_event = Event()
+ self.spinner_thread: Optional[Thread] = None
+
+ def _spin(self):
+ """The actual spinner animation."""
+ i = 0
+ while not self.stop_event.is_set():
+ sys.stdout.write(f"\r{self.spinner_chars[i]} {self.message}")
+ sys.stdout.flush()
+ i = (i + 1) % len(self.spinner_chars)
+ time.sleep(self.delay)
+
+ def start(self):
+ """Start the spinner animation."""
+ self.stop_event.clear()
+ self.spinner_thread = Thread(target=self._spin)
+ self.spinner_thread.daemon = True
+ self.spinner_thread.start()
+
+ def stop(self, final_message: Optional[str] = None):
+ """Stop the spinner and optionally display a final message."""
+ if self.spinner_thread is None:
+ return
+
+ self.stop_event.set()
+ self.spinner_thread.join()
+
+ # Clear the spinner line
+ sys.stdout.write("\r" + " " * (len(self.message) + 2) + "\r")
+
+ if final_message:
+ print(final_message)
+ sys.stdout.flush()
+
+ def update_message(self, message: str):
+ """Update the spinner message."""
+ self.message = message
+
+class ProgressTracker:
+ """A simple progress tracker that shows percentage completion."""
+
+ def __init__(
+ self,
+ total: int,
+ message: str = "Progress",
+ width: int = 40
+ ):
+ self.total = total
+ self.current = 0
+ self.message = message
+ self.width = width
+ self.start_time = time.time()
+
+ def update(self, increment: int = 1):
+ """Update the progress."""
+ self.current += increment
+ self._display()
+
+ def _display(self):
+ """Display the current progress."""
+ percentage = (self.current / self.total) * 100
+ filled = int(self.width * self.current / self.total)
+ bar = "█" * filled + "░" * (self.width - filled)
+
+ elapsed = time.time() - self.start_time
+ if self.current > 0:
+ rate = elapsed / self.current
+ eta = rate * (self.total - self.current)
+ time_str = f"ETA: {eta:.1f}s"
+ else:
+ time_str = "Calculating..."
+
+ sys.stdout.write(f"\r{self.message}: [{bar}] {percentage:.1f}% {time_str}")
+ sys.stdout.flush()
+
+ def finish(self, final_message: Optional[str] = None):
+ """Complete the progress bar and optionally show a final message."""
+ self._display()
+ print() # New line
+ if final_message:
+ print(final_message)
+
+@contextmanager
+def progress_context(message: str = "Loading...", success_message: str | None = None, error_message: str | None = None):
+ """Context manager for showing a spinner during an operation."""
+ spinner = Spinner(message)
+ spinner.start()
+ try:
+ yield spinner
+ spinner.stop(success_message)
+ except Exception as e:
+ spinner.stop(error_message)
+ raise
+
+def with_progress(message: str = "Loading..."):
+ """Decorator to add a spinner to a function."""
+ def decorator(func: Callable):
+ def wrapper(*args, **kwargs):
+ with progress_context(message) as spinner:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
\ No newline at end of file
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
index 9155df45..4a4776e4 100644
--- a/src/humanloop/sync/metadata_handler.py
+++ b/src/humanloop/sync/metadata_handler.py
@@ -61,7 +61,7 @@ def log_operation(
successful_files: Optional[List[str]] = None,
failed_files: Optional[List[str]] = None,
error: Optional[str] = None,
- start_time: Optional[float] = None
+ duration_ms: Optional[float] = None
) -> None:
"""Log a sync operation.
@@ -75,7 +75,6 @@ def log_operation(
start_time: Optional timestamp when the operation started (from time.time())
"""
current_time = datetime.now().isoformat()
- duration_ms = int((time.time() - (start_time or time.time())) * 1000) if start_time else 0
operation_data = {
"timestamp": current_time,
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index aefcad67..6b16cde0 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -61,7 +61,8 @@ def __init__(
self,
client: "BaseHumanloop",
base_dir: str = "humanloop",
- cache_size: int = DEFAULT_CACHE_SIZE
+ cache_size: int = DEFAULT_CACHE_SIZE,
+ log_level: int = logging.WARNING
):
"""
Parameters
@@ -69,10 +70,15 @@ def __init__(
client: Humanloop client instance
base_dir: Base directory for synced files (default: "humanloop")
cache_size: Maximum number of files to cache (default: DEFAULT_CACHE_SIZE)
+ log_level: Log level for logging (default: WARNING)
"""
self.client = client
self.base_dir = Path(base_dir)
self._cache_size = cache_size
+
+ global logger
+ logger.setLevel(log_level)
+
# Create a new cached version of get_file_content with the specified cache size
self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
# Initialize metadata handler
@@ -162,27 +168,11 @@ def _normalize_path(self, path: str) -> str:
return path
def is_file(self, path: str) -> bool:
- """Check if the path is a file by checking for .prompt or .agent extension.
-
- Args:
- path: The path to check
-
- Returns:
- True if the path ends with .prompt or .agent, False otherwise
- """
+ """Check if the path is a file by checking for .prompt or .agent extension."""
return path.endswith('.prompt') or path.endswith('.agent')
def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
- """Save serialized file to local filesystem.
-
- Args:
- serialized_content: The raw file content to save
- file_path: The path to save the file to
- file_type: The type of file (Prompt or Agent)
-
- Raises:
- Exception: If there is an error saving the file
- """
+ """Save serialized file to local filesystem."""
try:
# Create full path including base_dir prefix
full_path = self.base_dir / file_path
@@ -198,23 +188,12 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
# Clear the cache for this file to ensure we get fresh content next time
self.clear_cache()
-
- logger.info(f"Syncing {file_type} {file_path}")
except Exception as e:
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
def _pull_file(self, path: str, environment: str | None = None) -> None:
- """Pull a specific file from Humanloop to local filesystem.
-
- Args:
- path: The path of the file without the extension (e.g. "path/to/file")
- environment: The environment to pull the file from
-
- Raises:
- ValueError: If the file type is not supported
- Exception: If there is an error pulling the file
- """
+ """Pull a specific file from Humanloop to local filesystem."""
file = self.client.files.retrieve_by_path(
path=path,
environment=environment,
@@ -230,27 +209,16 @@ def _pull_directory(self,
path: str | None = None,
environment: str | None = None,
) -> List[str]:
- """Sync Prompt and Agent files from Humanloop to local filesystem.
-
- If `path` is provided, only the files under that path will be pulled.
- If `environment` is provided, the files will be pulled from that environment.
-
- Args:
- path: The path of the directory to pull from (e.g. "path/to/directory")
- environment: The environment to pull the files from
-
- Returns:
- List of successfully processed file paths
-
- Raises:
- Exception: If there is an error fetching files from Humanloop
- """
+ """Sync Prompt and Agent files from Humanloop to local filesystem."""
successful_files = []
failed_files = []
page = 1
+ logger.debug(f"Fetching files from directory: {path or '(root)'} in environment: {environment or '(default)'}")
+
while True:
try:
+ logger.debug(f"Requesting page {page} of files")
response = self.client.files.list_files(
type=["prompt", "agent"],
page=page,
@@ -260,8 +228,11 @@ def _pull_directory(self,
)
if len(response.records) == 0:
+ logger.debug("No more files found")
break
+ logger.debug(f"Found {len(response.records)} files from page {page}")
+
# Process each file
for file in response.records:
# Skip if not a Prompt or Agent
@@ -275,6 +246,7 @@ def _pull_directory(self,
continue
try:
+ logger.debug(f"Saving {file.type} {file.path}")
self._save_serialized_file(file.raw_file_content, file.path, file.type)
successful_files.append(file.path)
except Exception as e:
@@ -286,12 +258,10 @@ def _pull_directory(self,
formatted_error = format_api_error(e)
raise HumanloopRuntimeError(f"Failed to pull files: {formatted_error}")
- # Log summary only if we have results
- if successful_files or failed_files:
- if successful_files:
- logger.info(f"\nSynced {len(successful_files)} files")
- if failed_files:
- logger.error(f"Failed to sync {len(failed_files)} files")
+ if successful_files:
+ logger.info(f"Successfully pulled {len(successful_files)} files")
+ if failed_files:
+ logger.warning(f"Failed to pull {len(failed_files)} files")
return successful_files
@@ -310,39 +280,48 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
List of successfully processed file paths
"""
start_time = time.time()
+ normalized_path = self._normalize_path(path) if path else None
+
+ logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
try:
if path is None:
# Pull all files from the root
+ logger.debug("Pulling all files from root")
successful_files = self._pull_directory(None, environment)
failed_files = [] # Failed files are already logged in _pull_directory
else:
- normalized_path = self._normalize_path(path)
if self.is_file(path.strip()):
+ logger.debug(f"Pulling specific file: {normalized_path}")
self._pull_file(normalized_path, environment)
successful_files = [path]
failed_files = []
else:
+ logger.debug(f"Pulling directory: {normalized_path}")
successful_files = self._pull_directory(normalized_path, environment)
failed_files = [] # Failed files are already logged in _pull_directory
-
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
# Log the successful operation
self.metadata.log_operation(
operation_type="pull",
- path=path or "", # Use empty string if path is None
+ path=normalized_path or "", # Use empty string if path is None
environment=environment,
successful_files=successful_files,
failed_files=failed_files,
- start_time=start_time
+ duration_ms=duration_ms
)
return successful_files
except Exception as e:
+ duration_ms = int((time.time() - start_time) * 1000)
# Log the failed operation
self.metadata.log_operation(
operation_type="pull",
- path=path or "", # Use empty string if path is None
+ path=normalized_path or "", # Use empty string if path is None
environment=environment,
error=str(e),
- start_time=start_time
+ duration_ms=duration_ms
)
raise
From e0a4ae933cc14008fc2beaba99367a2fe78ba037 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 15:22:06 +0100
Subject: [PATCH 38/39] Add static type hints to MetadataHandler
---
src/humanloop/cli/__main__.py | 2 +-
src/humanloop/sync/metadata_handler.py | 34 +++++++++++++++++++-------
2 files changed, 26 insertions(+), 10 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 5f5aaa8a..ae5d1b43 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -123,7 +123,7 @@ def cli():
"--verbose",
"-v",
is_flag=True,
- help="Show detailed progress information",
+ help="Show detailed information about the operation",
)
@handle_sync_errors
@common_options
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
index 4a4776e4..3a9af2a6 100644
--- a/src/humanloop/sync/metadata_handler.py
+++ b/src/humanloop/sync/metadata_handler.py
@@ -2,11 +2,27 @@
import time
from datetime import datetime
from pathlib import Path
-from typing import Dict, List, Optional
+from typing import Dict, List, Optional, TypedDict, NotRequired
import logging
logger = logging.getLogger(__name__)
+class OperationData(TypedDict):
+ """Type definition for operation data structure."""
+ timestamp: str
+ operation_type: str
+ path: str
+ environment: NotRequired[Optional[str]]
+ successful_files: List[str]
+ failed_files: List[str]
+ error: NotRequired[Optional[str]]
+ duration_ms: NotRequired[Optional[float]]
+
+class Metadata(TypedDict):
+ """Type definition for the metadata structure."""
+ last_operation: Optional[OperationData]
+ history: List[OperationData]
+
class MetadataHandler:
"""Handles metadata storage and retrieval for sync operations.
@@ -14,7 +30,7 @@ class MetadataHandler:
and maintains a record of the most recent operation with detailed information.
"""
- def __init__(self, base_dir: Path, max_history: int = 5):
+ def __init__(self, base_dir: Path, max_history: int = 5) -> None:
"""Initialize the metadata handler.
Args:
@@ -29,13 +45,13 @@ def __init__(self, base_dir: Path, max_history: int = 5):
def _ensure_metadata_file(self) -> None:
"""Ensure the metadata file exists with proper structure."""
if not self.metadata_file.exists():
- initial_data = {
+ initial_data: Metadata = {
"last_operation": None,
"history": []
}
self._write_metadata(initial_data)
- def _read_metadata(self) -> Dict:
+ def _read_metadata(self) -> Metadata:
"""Read the current metadata from file."""
try:
with open(self.metadata_file, 'r') as f:
@@ -44,7 +60,7 @@ def _read_metadata(self) -> Dict:
logger.error(f"Error reading metadata file: {e}")
return {"last_operation": None, "history": []}
- def _write_metadata(self, data: Dict) -> None:
+ def _write_metadata(self, data: Metadata) -> None:
"""Write metadata to file."""
try:
self.metadata_file.parent.mkdir(parents=True, exist_ok=True)
@@ -72,11 +88,11 @@ def log_operation(
successful_files: List of successfully processed files
failed_files: List of files that failed to process
error: Any error message if the operation failed
- start_time: Optional timestamp when the operation started (from time.time())
+ duration_ms: Optional duration of the operation in milliseconds
"""
current_time = datetime.now().isoformat()
- operation_data = {
+ operation_data: OperationData = {
"timestamp": current_time,
"operation_type": operation_type,
"path": path,
@@ -98,12 +114,12 @@ def log_operation(
self._write_metadata(metadata)
- def get_last_operation(self) -> Optional[Dict]:
+ def get_last_operation(self) -> Optional[OperationData]:
"""Get the most recent operation details."""
metadata = self._read_metadata()
return metadata.get("last_operation")
- def get_history(self) -> List[Dict]:
+ def get_history(self) -> List[OperationData]:
"""Get the operation history."""
metadata = self._read_metadata()
return metadata.get("history", [])
\ No newline at end of file
From 40ffd1e5326bc3115f88b72a85ae699adbd54011 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 18:01:38 +0100
Subject: [PATCH 39/39] Make duration_ms in log_operation required
---
src/humanloop/sync/metadata_handler.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
index 3a9af2a6..18de2e8a 100644
--- a/src/humanloop/sync/metadata_handler.py
+++ b/src/humanloop/sync/metadata_handler.py
@@ -16,7 +16,7 @@ class OperationData(TypedDict):
successful_files: List[str]
failed_files: List[str]
error: NotRequired[Optional[str]]
- duration_ms: NotRequired[Optional[float]]
+ duration_ms: float
class Metadata(TypedDict):
"""Type definition for the metadata structure."""
@@ -73,22 +73,22 @@ def log_operation(
self,
operation_type: str,
path: str,
+ duration_ms: float,
environment: Optional[str] = None,
successful_files: Optional[List[str]] = None,
failed_files: Optional[List[str]] = None,
error: Optional[str] = None,
- duration_ms: Optional[float] = None
) -> None:
"""Log a sync operation.
Args:
operation_type: Type of operation (e.g., "pull", "push")
path: The path that was synced
+ duration_ms: Duration of the operation in milliseconds
environment: Optional environment name
successful_files: List of successfully processed files
failed_files: List of files that failed to process
error: Any error message if the operation failed
- duration_ms: Optional duration of the operation in milliseconds
"""
current_time = datetime.now().isoformat()