From c67a2eafbba33d0adcdacee271db7ccd35606ee2 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 2 Apr 2025 16:35:33 +0000 Subject: [PATCH] Release 0.8.29b --- poetry.lock | 622 +-- pyproject.toml | 6 +- reference.md | 2701 +++++++++++- requirements.txt | 4 +- src/humanloop/__init__.py | 151 +- src/humanloop/agents/__init__.py | 33 + src/humanloop/agents/client.py | 3679 +++++++++++++++++ src/humanloop/agents/requests/__init__.py | 17 + .../requests/agent_log_request_tool_choice.py | 8 + .../agents/requests/agent_request_stop.py | 5 + .../agents/requests/agent_request_template.py | 6 + .../requests/agent_request_tools_item.py | 7 + .../agents_call_request_tool_choice.py | 8 + .../agents_call_stream_request_tool_choice.py | 8 + src/humanloop/agents/types/__init__.py | 17 + .../types/agent_log_request_tool_choice.py | 8 + .../agents/types/agent_request_stop.py | 5 + .../agents/types/agent_request_template.py | 6 + .../agents/types/agent_request_tools_item.py | 7 + .../types/agents_call_request_tool_choice.py | 8 + .../agents_call_stream_request_tool_choice.py | 8 + src/humanloop/base_client.py | 4 + src/humanloop/core/client_wrapper.py | 2 +- src/humanloop/files/client.py | 20 +- ...th_files_retrieve_by_path_post_response.py | 8 +- ...th_files_retrieve_by_path_post_response.py | 3 +- src/humanloop/requests/__init__.py | 64 +- src/humanloop/requests/agent_call_response.py | 111 + .../agent_call_response_tool_choice.py | 8 + .../requests/agent_call_stream_response.py | 17 + .../agent_call_stream_response_payload.py | 8 + src/humanloop/requests/agent_inline_tool.py | 10 + .../requests/agent_kernel_request.py | 112 + .../requests/agent_kernel_request_stop.py | 5 + .../requests/agent_kernel_request_template.py | 6 + .../agent_kernel_request_tools_item.py | 7 + .../requests/agent_linked_file_request.py | 10 + .../requests/agent_linked_file_response.py | 17 + .../agent_linked_file_response_file.py | 21 + src/humanloop/requests/agent_log_response.py | 201 + .../agent_log_response_tool_choice.py | 8 + .../requests/agent_log_stream_response.py | 87 + src/humanloop/requests/agent_response.py | 242 ++ src/humanloop/requests/agent_response_stop.py | 5 + .../requests/agent_response_template.py | 6 + .../requests/agent_response_tools_item.py | 10 + .../requests/create_agent_log_response.py | 31 + src/humanloop/requests/dataset_response.py | 5 + ...arents_and_children_response_files_item.py | 8 +- src/humanloop/requests/evaluator_response.py | 5 + .../file_environment_response_file.py | 8 +- .../file_environment_variable_request.py | 15 + src/humanloop/requests/flow_response.py | 5 + src/humanloop/requests/linked_file_request.py | 10 + src/humanloop/requests/list_agents.py | 12 + src/humanloop/requests/log_response.py | 7 +- src/humanloop/requests/log_stream_response.py | 7 + .../requests/paginated_data_agent_response.py | 12 + ..._response_flow_response_agent_response.py} | 8 +- ...w_response_agent_response_records_item.py} | 14 +- .../requests/populate_template_response.py | 5 + .../requests/prompt_kernel_request.py | 6 + src/humanloop/requests/prompt_response.py | 5 + .../requests/run_version_response.py | 3 +- src/humanloop/requests/tool_call_response.py | 146 + .../version_deployment_response_file.py | 8 +- .../requests/version_id_response_version.py | 8 +- src/humanloop/tools/client.py | 819 +++- src/humanloop/types/__init__.py | 66 +- src/humanloop/types/agent_call_response.py | 142 + .../types/agent_call_response_tool_choice.py | 8 + .../types/agent_call_stream_response.py | 60 + .../agent_call_stream_response_payload.py | 8 + src/humanloop/types/agent_inline_tool.py | 21 + src/humanloop/types/agent_kernel_request.py | 122 + .../types/agent_kernel_request_stop.py | 5 + .../types/agent_kernel_request_template.py | 6 + .../types/agent_kernel_request_tools_item.py | 7 + .../types/agent_linked_file_request.py | 21 + .../types/agent_linked_file_response.py | 45 + .../types/agent_linked_file_response_file.py | 16 + src/humanloop/types/agent_log_response.py | 237 ++ .../types/agent_log_response_tool_choice.py | 8 + .../types/agent_log_stream_response.py | 98 + src/humanloop/types/agent_response.py | 273 ++ src/humanloop/types/agent_response_stop.py | 5 + .../types/agent_response_template.py | 6 + .../types/agent_response_tools_item.py | 10 + .../types/create_agent_log_response.py | 42 + src/humanloop/types/dataset_response.py | 9 + ...tory_with_parents_and_children_response.py | 6 + ...arents_and_children_response_files_item.py | 3 +- src/humanloop/types/evaluatee_response.py | 4 + .../types/evaluation_evaluator_response.py | 4 + .../types/evaluation_log_response.py | 6 + src/humanloop/types/evaluation_response.py | 4 + .../types/evaluation_run_response.py | 4 + .../types/evaluation_runs_response.py | 4 + src/humanloop/types/evaluator_log_response.py | 6 + src/humanloop/types/evaluator_response.py | 13 + src/humanloop/types/event_type.py | 19 + .../types/file_environment_response.py | 4 + .../types/file_environment_response_file.py | 3 +- .../file_environment_variable_request.py | 27 + src/humanloop/types/file_type.py | 2 +- src/humanloop/types/files_tool_type.py | 2 +- src/humanloop/types/flow_log_response.py | 6 + src/humanloop/types/flow_response.py | 13 + src/humanloop/types/linked_file_request.py | 21 + src/humanloop/types/list_agents.py | 44 + src/humanloop/types/list_evaluators.py | 4 + src/humanloop/types/list_flows.py | 4 + src/humanloop/types/list_prompts.py | 4 + src/humanloop/types/list_tools.py | 4 + src/humanloop/types/log_response.py | 5 +- src/humanloop/types/log_stream_response.py | 7 + src/humanloop/types/model_providers.py | 2 +- .../types/monitoring_evaluator_response.py | 4 + .../types/paginated_data_agent_response.py | 44 + .../paginated_data_evaluation_log_response.py | 6 + .../paginated_data_evaluator_response.py | 4 + .../types/paginated_data_flow_response.py | 4 + .../types/paginated_data_log_response.py | 6 + .../types/paginated_data_prompt_response.py | 4 + .../types/paginated_data_tool_response.py | 4 + ..._response_flow_response_agent_response.py} | 34 +- ...w_response_agent_response_records_item.py} | 7 +- .../types/paginated_evaluation_response.py | 4 + .../types/populate_template_response.py | 13 + src/humanloop/types/prompt_call_response.py | 4 + src/humanloop/types/prompt_kernel_request.py | 6 + src/humanloop/types/prompt_log_response.py | 6 + src/humanloop/types/prompt_response.py | 13 + src/humanloop/types/run_version_response.py | 3 +- src/humanloop/types/tool_call_response.py | 186 + src/humanloop/types/tool_log_response.py | 6 + src/humanloop/types/tool_response.py | 4 + .../types/version_deployment_response.py | 4 + .../types/version_deployment_response_file.py | 3 +- src/humanloop/types/version_id_response.py | 4 + .../types/version_id_response_version.py | 3 +- 141 files changed, 10805 insertions(+), 464 deletions(-) create mode 100644 src/humanloop/agents/__init__.py create mode 100644 src/humanloop/agents/client.py create mode 100644 src/humanloop/agents/requests/__init__.py create mode 100644 src/humanloop/agents/requests/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agent_request_stop.py create mode 100644 src/humanloop/agents/requests/agent_request_template.py create mode 100644 src/humanloop/agents/requests/agent_request_tools_item.py create mode 100644 src/humanloop/agents/requests/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/agents/types/__init__.py create mode 100644 src/humanloop/agents/types/agent_log_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agent_request_stop.py create mode 100644 src/humanloop/agents/types/agent_request_template.py create mode 100644 src/humanloop/agents/types/agent_request_tools_item.py create mode 100644 src/humanloop/agents/types/agents_call_request_tool_choice.py create mode 100644 src/humanloop/agents/types/agents_call_stream_request_tool_choice.py create mode 100644 src/humanloop/requests/agent_call_response.py create mode 100644 src/humanloop/requests/agent_call_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_call_stream_response.py create mode 100644 src/humanloop/requests/agent_call_stream_response_payload.py create mode 100644 src/humanloop/requests/agent_inline_tool.py create mode 100644 src/humanloop/requests/agent_kernel_request.py create mode 100644 src/humanloop/requests/agent_kernel_request_stop.py create mode 100644 src/humanloop/requests/agent_kernel_request_template.py create mode 100644 src/humanloop/requests/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/requests/agent_linked_file_request.py create mode 100644 src/humanloop/requests/agent_linked_file_response.py create mode 100644 src/humanloop/requests/agent_linked_file_response_file.py create mode 100644 src/humanloop/requests/agent_log_response.py create mode 100644 src/humanloop/requests/agent_log_response_tool_choice.py create mode 100644 src/humanloop/requests/agent_log_stream_response.py create mode 100644 src/humanloop/requests/agent_response.py create mode 100644 src/humanloop/requests/agent_response_stop.py create mode 100644 src/humanloop/requests/agent_response_template.py create mode 100644 src/humanloop/requests/agent_response_tools_item.py create mode 100644 src/humanloop/requests/create_agent_log_response.py create mode 100644 src/humanloop/requests/file_environment_variable_request.py create mode 100644 src/humanloop/requests/linked_file_request.py create mode 100644 src/humanloop/requests/list_agents.py create mode 100644 src/humanloop/requests/log_stream_response.py create mode 100644 src/humanloop/requests/paginated_data_agent_response.py rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (65%) rename src/humanloop/requests/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (58%) create mode 100644 src/humanloop/requests/tool_call_response.py create mode 100644 src/humanloop/types/agent_call_response.py create mode 100644 src/humanloop/types/agent_call_response_tool_choice.py create mode 100644 src/humanloop/types/agent_call_stream_response.py create mode 100644 src/humanloop/types/agent_call_stream_response_payload.py create mode 100644 src/humanloop/types/agent_inline_tool.py create mode 100644 src/humanloop/types/agent_kernel_request.py create mode 100644 src/humanloop/types/agent_kernel_request_stop.py create mode 100644 src/humanloop/types/agent_kernel_request_template.py create mode 100644 src/humanloop/types/agent_kernel_request_tools_item.py create mode 100644 src/humanloop/types/agent_linked_file_request.py create mode 100644 src/humanloop/types/agent_linked_file_response.py create mode 100644 src/humanloop/types/agent_linked_file_response_file.py create mode 100644 src/humanloop/types/agent_log_response.py create mode 100644 src/humanloop/types/agent_log_response_tool_choice.py create mode 100644 src/humanloop/types/agent_log_stream_response.py create mode 100644 src/humanloop/types/agent_response.py create mode 100644 src/humanloop/types/agent_response_stop.py create mode 100644 src/humanloop/types/agent_response_template.py create mode 100644 src/humanloop/types/agent_response_tools_item.py create mode 100644 src/humanloop/types/create_agent_log_response.py create mode 100644 src/humanloop/types/event_type.py create mode 100644 src/humanloop/types/file_environment_variable_request.py create mode 100644 src/humanloop/types/linked_file_request.py create mode 100644 src/humanloop/types/list_agents.py create mode 100644 src/humanloop/types/log_stream_response.py create mode 100644 src/humanloop/types/paginated_data_agent_response.py rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py} (52%) rename src/humanloop/types/{paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py => paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py} (63%) create mode 100644 src/humanloop/types/tool_call_response.py diff --git a/poetry.lock b/poetry.lock index 29a80984..35b2ec1d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -345,13 +345,13 @@ typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "fsspec" -version = "2025.3.0" +version = "2025.3.2" description = "File-system specification" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "fsspec-2025.3.0-py3-none-any.whl", hash = "sha256:efb87af3efa9103f94ca91a7f8cb7a4df91af9f74fc106c9c7ea0efd7277c1b3"}, - {file = "fsspec-2025.3.0.tar.gz", hash = "sha256:a935fd1ea872591f2b5148907d103488fc523295e6c64b835cfad8c3eca44972"}, + {file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"}, + {file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"}, ] [package.extras] @@ -384,13 +384,13 @@ tqdm = ["tqdm"] [[package]] name = "groq" -version = "0.20.0" +version = "0.21.0" description = "The official Python library for the groq API" optional = false python-versions = ">=3.8" files = [ - {file = "groq-0.20.0-py3-none-any.whl", hash = "sha256:c27b89903eb2b77f94ed95837ff3cadfc8c9e670953b1c5e5e2e855fea54b6c5"}, - {file = "groq-0.20.0.tar.gz", hash = "sha256:2a201d41cae768c53d411dabcfea2333e2e138df22d909ed555ece426f1e016f"}, + {file = "groq-0.21.0-py3-none-any.whl", hash = "sha256:ab1cb6bf4fb4e4f59fae0bc2337295b2b8b4335d8d5b8148a4d0ca26490a16b3"}, + {file = "groq-0.21.0.tar.gz", hash = "sha256:0a94920d9599c02a46f80c207eb7e3ab5dbf415790661e4b91216c39ba1089d0"}, ] [package.dependencies] @@ -470,13 +470,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.29.3" +version = "0.30.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.29.3-py3-none-any.whl", hash = "sha256:0b25710932ac649c08cdbefa6c6ccb8e88eef82927cacdb048efb726429453aa"}, - {file = "huggingface_hub-0.29.3.tar.gz", hash = "sha256:64519a25716e0ba382ba2d3fb3ca082e7c7eb4a2fc634d200e8380006e0760e5"}, + {file = "huggingface_hub-0.30.1-py3-none-any.whl", hash = "sha256:0f6aa5ec5a4e68e5b9e45d556b4e5ea180c58f5a5ffa734e7f38c9d573028959"}, + {file = "huggingface_hub-0.30.1.tar.gz", hash = "sha256:f379e8b8d0791295602538856638460ae3cf679c7f304201eb80fb98c771950e"}, ] [package.dependencies] @@ -494,6 +494,7 @@ cli = ["InquirerPy (==0.3.4)"] dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"] fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] hf-transfer = ["hf-transfer (>=0.1.4)"] +hf-xet = ["hf-xet (>=0.1.4)"] inference = ["aiohttp"] quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"] tensorflow = ["graphviz", "pydot", "tensorflow"] @@ -541,13 +542,13 @@ type = ["pytest-mypy"] [[package]] name = "iniconfig" -version = "2.0.0" +version = "2.1.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, ] [[package]] @@ -872,13 +873,13 @@ files = [ [[package]] name = "openai" -version = "1.66.5" +version = "1.70.0" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.66.5-py3-none-any.whl", hash = "sha256:74be528175f8389f67675830c51a15bd51e874425c86d3de6153bf70ed6c2884"}, - {file = "openai-1.66.5.tar.gz", hash = "sha256:f61b8fac29490ca8fdc6d996aa6926c18dbe5639536f8c40219c40db05511b11"}, + {file = "openai-1.70.0-py3-none-any.whl", hash = "sha256:f6438d053fd8b2e05fd6bef70871e832d9bbdf55e119d0ac5b92726f1ae6f614"}, + {file = "openai-1.70.0.tar.gz", hash = "sha256:e52a8d54c3efeb08cf58539b5b21a5abef25368b5432965e4de88cdf4e091b2b"}, ] [package.dependencies] @@ -894,16 +895,17 @@ typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] realtime = ["websockets (>=13,<15)"] +voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"] [[package]] name = "opentelemetry-api" -version = "1.31.0" +version = "1.31.1" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_api-1.31.0-py3-none-any.whl", hash = "sha256:145b72c6c16977c005c568ec32f4946054ab793d8474a17fd884b0397582c5f2"}, - {file = "opentelemetry_api-1.31.0.tar.gz", hash = "sha256:d8da59e83e8e3993b4726e4c1023cd46f57c4d5a73142e239247e7d814309de1"}, + {file = "opentelemetry_api-1.31.1-py3-none-any.whl", hash = "sha256:1511a3f470c9c8a32eeea68d4ea37835880c0eed09dd1a0187acc8b1301da0a1"}, + {file = "opentelemetry_api-1.31.1.tar.gz", hash = "sha256:137ad4b64215f02b3000a0292e077641c8611aab636414632a9b9068593b7e91"}, ] [package.dependencies] @@ -912,30 +914,30 @@ importlib-metadata = ">=6.0,<8.7.0" [[package]] name = "opentelemetry-instrumentation" -version = "0.52b0" +version = "0.52b1" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_instrumentation-0.52b0-py3-none-any.whl", hash = "sha256:0c93ca9fa1d438e2b741f21d6aa870c991e0e3b0f1367c8626bb3981b12ad2fe"}, - {file = "opentelemetry_instrumentation-0.52b0.tar.gz", hash = "sha256:da75d328f9dbd59c6e61af6adec29f4bb581f5cbf3ddfae348268f9c1edaceeb"}, + {file = "opentelemetry_instrumentation-0.52b1-py3-none-any.whl", hash = "sha256:8c0059c4379d77bbd8015c8d8476020efe873c123047ec069bb335e4b8717477"}, + {file = "opentelemetry_instrumentation-0.52b1.tar.gz", hash = "sha256:739f3bfadbbeec04dd59297479e15660a53df93c131d907bb61052e3d3c1406f"}, ] [package.dependencies] opentelemetry-api = ">=1.4,<2.0" -opentelemetry-semantic-conventions = "0.52b0" +opentelemetry-semantic-conventions = "0.52b1" packaging = ">=18.0" wrapt = ">=1.0.0,<2.0.0" [[package]] name = "opentelemetry-instrumentation-anthropic" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry Anthropic instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_anthropic-0.38.12-py3-none-any.whl", hash = "sha256:46d671672a66073d523707d8e4dfb66e74b1370ee4fc108a139e0425e59d3fe3"}, - {file = "opentelemetry_instrumentation_anthropic-0.38.12.tar.gz", hash = "sha256:6a61a27b15eca3914d242a545cc23b8adab3c3180f34c1ed009293928222cfeb"}, + {file = "opentelemetry_instrumentation_anthropic-0.39.0-py3-none-any.whl", hash = "sha256:4e456883a2dec8da1977a27d6444798252829e23cf0500f222b70db77cb3d125"}, + {file = "opentelemetry_instrumentation_anthropic-0.39.0.tar.gz", hash = "sha256:62bec0cde6ebc0b77ef324e5e8262fd5545d7db68f38a9da16916d703e32a7ad"}, ] [package.dependencies] @@ -946,13 +948,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3" [[package]] name = "opentelemetry-instrumentation-bedrock" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry Bedrock instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_bedrock-0.38.12-py3-none-any.whl", hash = "sha256:2aa45e6fb617aae791b15b2b721c8ccbf7997e9c84de58c7f2011857172857ee"}, - {file = "opentelemetry_instrumentation_bedrock-0.38.12.tar.gz", hash = "sha256:e1b09fd981607898e6031096f00475d15190fec6e33e34803171810654d80500"}, + {file = "opentelemetry_instrumentation_bedrock-0.39.0-py3-none-any.whl", hash = "sha256:d5923b1c72216d7e23bf698b6ffbbfdb1cf7cbb26c6b04f5626720fbea33af45"}, + {file = "opentelemetry_instrumentation_bedrock-0.39.0.tar.gz", hash = "sha256:a643abaeb223d7337dbb4cec13e5d4f8be3db2696e954ff40351c5b87646ac22"}, ] [package.dependencies] @@ -961,16 +963,17 @@ opentelemetry-api = ">=1.28.0,<2.0.0" opentelemetry-instrumentation = ">=0.50b0" opentelemetry-semantic-conventions = ">=0.50b0" opentelemetry-semantic-conventions-ai = "0.4.3" +tokenizers = ">=0.13.0" [[package]] name = "opentelemetry-instrumentation-cohere" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry Cohere instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_cohere-0.38.12-py3-none-any.whl", hash = "sha256:fcec7422ddf0be47af636ccae005eabcbfe0a75c15d365d27b6f01f247e5e4c6"}, - {file = "opentelemetry_instrumentation_cohere-0.38.12.tar.gz", hash = "sha256:7d766cb409fdbc9b6626fef2731883d47312f7cbb201b1fbfb3c259c17095441"}, + {file = "opentelemetry_instrumentation_cohere-0.39.0-py3-none-any.whl", hash = "sha256:3edf85d1f5236492568d5a7ced15617922c832535cc74cf2b6d5a55abe1968a6"}, + {file = "opentelemetry_instrumentation_cohere-0.39.0.tar.gz", hash = "sha256:cd1644ec795aa89b9609890e7da2ce0b97287900e0558ba4237588436fd87556"}, ] [package.dependencies] @@ -981,13 +984,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3" [[package]] name = "opentelemetry-instrumentation-groq" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry Groq instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_groq-0.38.12-py3-none-any.whl", hash = "sha256:faf30879be7c86fb9f8136dd6c8c061619c5e361608bd744f4907537b26a9423"}, - {file = "opentelemetry_instrumentation_groq-0.38.12.tar.gz", hash = "sha256:9dff7a8d3875cf16bbad90a19dc6b588bb999014db9d0f95fcb0b0a82d64b2cf"}, + {file = "opentelemetry_instrumentation_groq-0.39.0-py3-none-any.whl", hash = "sha256:60d3b0bdcb8f765ab0f0ee749a9b78285338ae40506ff27ed961a76f23d377d7"}, + {file = "opentelemetry_instrumentation_groq-0.39.0.tar.gz", hash = "sha256:7664f9d097dcc4bf8d611068c85c4e19b522ae70dda13b4d172230d830191600"}, ] [package.dependencies] @@ -998,13 +1001,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3" [[package]] name = "opentelemetry-instrumentation-openai" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry OpenAI instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_openai-0.38.12-py3-none-any.whl", hash = "sha256:a9d0557f36d314493878ca0d27b79ddc3c1362f40b1456378dc24c971b46b227"}, - {file = "opentelemetry_instrumentation_openai-0.38.12.tar.gz", hash = "sha256:1666af417ede0ac7fbb3a2414a1286c1cc63a83684a82cf0db2155585f15dea8"}, + {file = "opentelemetry_instrumentation_openai-0.39.0-py3-none-any.whl", hash = "sha256:ca6f0e2e4af526e05850b87c6749068d7a4557ef3f02babf956552760af2315b"}, + {file = "opentelemetry_instrumentation_openai-0.39.0.tar.gz", hash = "sha256:dffb5cb2d89410dc4cb5ed2b978e930eadecd430ea5b7e0ac003088e1eee0f4d"}, ] [package.dependencies] @@ -1016,13 +1019,13 @@ tiktoken = ">=0.6.0,<1" [[package]] name = "opentelemetry-instrumentation-replicate" -version = "0.38.12" +version = "0.39.0" description = "OpenTelemetry Replicate instrumentation" optional = false python-versions = "<4,>=3.9" files = [ - {file = "opentelemetry_instrumentation_replicate-0.38.12-py3-none-any.whl", hash = "sha256:a815cc38b97e0383d9a848da0cc10eb3537e40819e64bd3cd103458a837c51f2"}, - {file = "opentelemetry_instrumentation_replicate-0.38.12.tar.gz", hash = "sha256:ea10a3dfdef085cc4135584f1f958ea1fea586349f9c1b101ae253d5ceb19f43"}, + {file = "opentelemetry_instrumentation_replicate-0.39.0-py3-none-any.whl", hash = "sha256:074de415aa96d8d00062c70676e6bec9d43c0db7d674d517b99e9a522cb62c49"}, + {file = "opentelemetry_instrumentation_replicate-0.39.0.tar.gz", hash = "sha256:7f113f3bdd6bf1be3872b3de5d595a63ed368874697c2f1388bdaada18479c57"}, ] [package.dependencies] @@ -1033,13 +1036,13 @@ opentelemetry-semantic-conventions-ai = "0.4.3" [[package]] name = "opentelemetry-proto" -version = "1.31.0" +version = "1.31.1" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_proto-1.31.0-py3-none-any.whl", hash = "sha256:ad4ded738e3d48d3280b37984eae75e63be01d8a0b04c83c743714aba960670d"}, - {file = "opentelemetry_proto-1.31.0.tar.gz", hash = "sha256:5efe313788a8f4b739a94beb207749587a449a5e90c68b0b6a931567e8ca721d"}, + {file = "opentelemetry_proto-1.31.1-py3-none-any.whl", hash = "sha256:1398ffc6d850c2f1549ce355744e574c8cd7c1dba3eea900d630d52c41d07178"}, + {file = "opentelemetry_proto-1.31.1.tar.gz", hash = "sha256:d93e9c2b444e63d1064fb50ae035bcb09e5822274f1683886970d2734208e790"}, ] [package.dependencies] @@ -1047,34 +1050,34 @@ protobuf = ">=5.0,<6.0" [[package]] name = "opentelemetry-sdk" -version = "1.31.0" +version = "1.31.1" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_sdk-1.31.0-py3-none-any.whl", hash = "sha256:97c9a03865e69723725fb64fe04343a488c3e61e684eb804bd7d6da2215dfc60"}, - {file = "opentelemetry_sdk-1.31.0.tar.gz", hash = "sha256:452d7d5b3c1db2e5e4cb64abede0ddd20690cb244a559c73a59652fdf6726070"}, + {file = "opentelemetry_sdk-1.31.1-py3-none-any.whl", hash = "sha256:882d021321f223e37afaca7b4e06c1d8bbc013f9e17ff48a7aa017460a8e7dae"}, + {file = "opentelemetry_sdk-1.31.1.tar.gz", hash = "sha256:c95f61e74b60769f8ff01ec6ffd3d29684743404603df34b20aa16a49dc8d903"}, ] [package.dependencies] -opentelemetry-api = "1.31.0" -opentelemetry-semantic-conventions = "0.52b0" +opentelemetry-api = "1.31.1" +opentelemetry-semantic-conventions = "0.52b1" typing-extensions = ">=3.7.4" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.52b0" +version = "0.52b1" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" files = [ - {file = "opentelemetry_semantic_conventions-0.52b0-py3-none-any.whl", hash = "sha256:4d843652ae1f9f3c0d4d8df0bfef740627c90495ac043fc33f0a04bad3b606e2"}, - {file = "opentelemetry_semantic_conventions-0.52b0.tar.gz", hash = "sha256:f8bc8873a69d0a2f45746c31980baad2bb10ccee16b1816497ccf99417770386"}, + {file = "opentelemetry_semantic_conventions-0.52b1-py3-none-any.whl", hash = "sha256:72b42db327e29ca8bb1b91e8082514ddf3bbf33f32ec088feb09526ade4bc77e"}, + {file = "opentelemetry_semantic_conventions-0.52b1.tar.gz", hash = "sha256:7b3d226ecf7523c27499758a58b542b48a0ac8d12be03c0488ff8ec60c5bae5d"}, ] [package.dependencies] deprecated = ">=1.2.6" -opentelemetry-api = "1.31.0" +opentelemetry-api = "1.31.1" [[package]] name = "opentelemetry-semantic-conventions-ai" @@ -1243,22 +1246,22 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "protobuf" -version = "5.29.3" +version = "5.29.4" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.29.3-cp310-abi3-win32.whl", hash = "sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888"}, - {file = "protobuf-5.29.3-cp310-abi3-win_amd64.whl", hash = "sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a"}, - {file = "protobuf-5.29.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e"}, - {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84"}, - {file = "protobuf-5.29.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f"}, - {file = "protobuf-5.29.3-cp38-cp38-win32.whl", hash = "sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252"}, - {file = "protobuf-5.29.3-cp38-cp38-win_amd64.whl", hash = "sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107"}, - {file = "protobuf-5.29.3-cp39-cp39-win32.whl", hash = "sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7"}, - {file = "protobuf-5.29.3-cp39-cp39-win_amd64.whl", hash = "sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da"}, - {file = "protobuf-5.29.3-py3-none-any.whl", hash = "sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f"}, - {file = "protobuf-5.29.3.tar.gz", hash = "sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620"}, + {file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"}, + {file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"}, + {file = "protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e"}, + {file = "protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922"}, + {file = "protobuf-5.29.4-cp38-cp38-win32.whl", hash = "sha256:1832f0515b62d12d8e6ffc078d7e9eb06969aa6dc13c13e1036e39d73bebc2de"}, + {file = "protobuf-5.29.4-cp38-cp38-win_amd64.whl", hash = "sha256:476cb7b14914c780605a8cf62e38c2a85f8caff2e28a6a0bad827ec7d6c85d68"}, + {file = "protobuf-5.29.4-cp39-cp39-win32.whl", hash = "sha256:fd32223020cb25a2cc100366f1dedc904e2d71d9322403224cdde5fdced0dabe"}, + {file = "protobuf-5.29.4-cp39-cp39-win_amd64.whl", hash = "sha256:678974e1e3a9b975b8bc2447fca458db5f93a2fb6b0c8db46b6675b5b5346812"}, + {file = "protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862"}, + {file = "protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99"}, ] [[package]] @@ -1317,19 +1320,20 @@ test = ["cffi", "hypothesis", "pandas", "pytest", "pytz"] [[package]] name = "pydantic" -version = "2.10.6" +version = "2.11.1" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, + {file = "pydantic-2.11.1-py3-none-any.whl", hash = "sha256:5b6c415eee9f8123a14d859be0c84363fec6b1feb6b688d6435801230b56e0b8"}, + {file = "pydantic-2.11.1.tar.gz", hash = "sha256:442557d2910e75c991c39f4b4ab18963d57b9b55122c8b2a9cd176d8c29ce968"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" +pydantic-core = "2.33.0" typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -1337,111 +1341,110 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.33.0" description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, + {file = "pydantic_core-2.33.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71dffba8fe9ddff628c68f3abd845e91b028361d43c5f8e7b3f8b91d7d85413e"}, + {file = "pydantic_core-2.33.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:abaeec1be6ed535a5d7ffc2e6c390083c425832b20efd621562fbb5bff6dc518"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759871f00e26ad3709efc773ac37b4d571de065f9dfb1778012908bcc36b3a73"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dcfebee69cd5e1c0b76a17e17e347c84b00acebb8dd8edb22d4a03e88e82a207"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b1262b912435a501fa04cd213720609e2cefa723a07c92017d18693e69bf00b"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4726f1f3f42d6a25678c67da3f0b10f148f5655813c5aca54b0d1742ba821b8f"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e790954b5093dff1e3a9a2523fddc4e79722d6f07993b4cd5547825c3cbf97b5"}, + {file = "pydantic_core-2.33.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34e7fb3abe375b5c4e64fab75733d605dda0f59827752debc99c17cb2d5f3276"}, + {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ecb158fb9b9091b515213bed3061eb7deb1d3b4e02327c27a0ea714ff46b0760"}, + {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:4d9149e7528af8bbd76cc055967e6e04617dcb2a2afdaa3dea899406c5521faa"}, + {file = "pydantic_core-2.33.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e81a295adccf73477220e15ff79235ca9dcbcee4be459eb9d4ce9a2763b8386c"}, + {file = "pydantic_core-2.33.0-cp310-cp310-win32.whl", hash = "sha256:f22dab23cdbce2005f26a8f0c71698457861f97fc6318c75814a50c75e87d025"}, + {file = "pydantic_core-2.33.0-cp310-cp310-win_amd64.whl", hash = "sha256:9cb2390355ba084c1ad49485d18449b4242da344dea3e0fe10babd1f0db7dcfc"}, + {file = "pydantic_core-2.33.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a608a75846804271cf9c83e40bbb4dab2ac614d33c6fd5b0c6187f53f5c593ef"}, + {file = "pydantic_core-2.33.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e1c69aa459f5609dec2fa0652d495353accf3eda5bdb18782bc5a2ae45c9273a"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9ec80eb5a5f45a2211793f1c4aeddff0c3761d1c70d684965c1807e923a588b"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e925819a98318d17251776bd3d6aa9f3ff77b965762155bdad15d1a9265c4cfd"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bf68bb859799e9cec3d9dd8323c40c00a254aabb56fe08f907e437005932f2b"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1b2ea72dea0825949a045fa4071f6d5b3d7620d2a208335207793cf29c5a182d"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1583539533160186ac546b49f5cde9ffc928062c96920f58bd95de32ffd7bffd"}, + {file = "pydantic_core-2.33.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:23c3e77bf8a7317612e5c26a3b084c7edeb9552d645742a54a5867635b4f2453"}, + {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a7a7f2a3f628d2f7ef11cb6188bcf0b9e1558151d511b974dfea10a49afe192b"}, + {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:f1fb026c575e16f673c61c7b86144517705865173f3d0907040ac30c4f9f5915"}, + {file = "pydantic_core-2.33.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:635702b2fed997e0ac256b2cfbdb4dd0bf7c56b5d8fba8ef03489c03b3eb40e2"}, + {file = "pydantic_core-2.33.0-cp311-cp311-win32.whl", hash = "sha256:07b4ced28fccae3f00626eaa0c4001aa9ec140a29501770a88dbbb0966019a86"}, + {file = "pydantic_core-2.33.0-cp311-cp311-win_amd64.whl", hash = "sha256:4927564be53239a87770a5f86bdc272b8d1fbb87ab7783ad70255b4ab01aa25b"}, + {file = "pydantic_core-2.33.0-cp311-cp311-win_arm64.whl", hash = "sha256:69297418ad644d521ea3e1aa2e14a2a422726167e9ad22b89e8f1130d68e1e9a"}, + {file = "pydantic_core-2.33.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6c32a40712e3662bebe524abe8abb757f2fa2000028d64cc5a1006016c06af43"}, + {file = "pydantic_core-2.33.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8ec86b5baa36f0a0bfb37db86c7d52652f8e8aa076ab745ef7725784183c3fdd"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4deac83a8cc1d09e40683be0bc6d1fa4cde8df0a9bf0cda5693f9b0569ac01b6"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:175ab598fb457a9aee63206a1993874badf3ed9a456e0654273e56f00747bbd6"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f36afd0d56a6c42cf4e8465b6441cf546ed69d3a4ec92724cc9c8c61bd6ecf4"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a98257451164666afafc7cbf5fb00d613e33f7e7ebb322fbcd99345695a9a61"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ecc6d02d69b54a2eb83ebcc6f29df04957f734bcf309d346b4f83354d8376862"}, + {file = "pydantic_core-2.33.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1a69b7596c6603afd049ce7f3835bcf57dd3892fc7279f0ddf987bebed8caa5a"}, + {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea30239c148b6ef41364c6f51d103c2988965b643d62e10b233b5efdca8c0099"}, + {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:abfa44cf2f7f7d7a199be6c6ec141c9024063205545aa09304349781b9a125e6"}, + {file = "pydantic_core-2.33.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20d4275f3c4659d92048c70797e5fdc396c6e4446caf517ba5cad2db60cd39d3"}, + {file = "pydantic_core-2.33.0-cp312-cp312-win32.whl", hash = "sha256:918f2013d7eadea1d88d1a35fd4a1e16aaf90343eb446f91cb091ce7f9b431a2"}, + {file = "pydantic_core-2.33.0-cp312-cp312-win_amd64.whl", hash = "sha256:aec79acc183865bad120b0190afac467c20b15289050648b876b07777e67ea48"}, + {file = "pydantic_core-2.33.0-cp312-cp312-win_arm64.whl", hash = "sha256:5461934e895968655225dfa8b3be79e7e927e95d4bd6c2d40edd2fa7052e71b6"}, + {file = "pydantic_core-2.33.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f00e8b59e1fc8f09d05594aa7d2b726f1b277ca6155fc84c0396db1b373c4555"}, + {file = "pydantic_core-2.33.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a73be93ecef45786d7d95b0c5e9b294faf35629d03d5b145b09b81258c7cd6d"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff48a55be9da6930254565ff5238d71d5e9cd8c5487a191cb85df3bdb8c77365"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:26a4ea04195638dcd8c53dadb545d70badba51735b1594810e9768c2c0b4a5da"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41d698dcbe12b60661f0632b543dbb119e6ba088103b364ff65e951610cb7ce0"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae62032ef513fe6281ef0009e30838a01057b832dc265da32c10469622613885"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f225f3a3995dbbc26affc191d0443c6c4aa71b83358fd4c2b7d63e2f6f0336f9"}, + {file = "pydantic_core-2.33.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5bdd36b362f419c78d09630cbaebc64913f66f62bda6d42d5fbb08da8cc4f181"}, + {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2a0147c0bef783fd9abc9f016d66edb6cac466dc54a17ec5f5ada08ff65caf5d"}, + {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:c860773a0f205926172c6644c394e02c25421dc9a456deff16f64c0e299487d3"}, + {file = "pydantic_core-2.33.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:138d31e3f90087f42aa6286fb640f3c7a8eb7bdae829418265e7e7474bd2574b"}, + {file = "pydantic_core-2.33.0-cp313-cp313-win32.whl", hash = "sha256:d20cbb9d3e95114325780f3cfe990f3ecae24de7a2d75f978783878cce2ad585"}, + {file = "pydantic_core-2.33.0-cp313-cp313-win_amd64.whl", hash = "sha256:ca1103d70306489e3d006b0f79db8ca5dd3c977f6f13b2c59ff745249431a606"}, + {file = "pydantic_core-2.33.0-cp313-cp313-win_arm64.whl", hash = "sha256:6291797cad239285275558e0a27872da735b05c75d5237bbade8736f80e4c225"}, + {file = "pydantic_core-2.33.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7b79af799630af263eca9ec87db519426d8c9b3be35016eddad1832bac812d87"}, + {file = "pydantic_core-2.33.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eabf946a4739b5237f4f56d77fa6668263bc466d06a8036c055587c130a46f7b"}, + {file = "pydantic_core-2.33.0-cp313-cp313t-win_amd64.whl", hash = "sha256:8a1d581e8cdbb857b0e0e81df98603376c1a5c34dc5e54039dcc00f043df81e7"}, + {file = "pydantic_core-2.33.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:7c9c84749f5787781c1c45bb99f433402e484e515b40675a5d121ea14711cf61"}, + {file = "pydantic_core-2.33.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64672fa888595a959cfeff957a654e947e65bbe1d7d82f550417cbd6898a1d6b"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26bc7367c0961dec292244ef2549afa396e72e28cc24706210bd44d947582c59"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce72d46eb201ca43994303025bd54d8a35a3fc2a3495fac653d6eb7205ce04f4"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:14229c1504287533dbf6b1fc56f752ce2b4e9694022ae7509631ce346158de11"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:085d8985b1c1e48ef271e98a658f562f29d89bda98bf120502283efbc87313eb"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31860fbda80d8f6828e84b4a4d129fd9c4535996b8249cfb8c720dc2a1a00bb8"}, + {file = "pydantic_core-2.33.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f200b2f20856b5a6c3a35f0d4e344019f805e363416e609e9b47c552d35fd5ea"}, + {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f72914cfd1d0176e58ddc05c7a47674ef4222c8253bf70322923e73e14a4ac3"}, + {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:91301a0980a1d4530d4ba7e6a739ca1a6b31341252cb709948e0aca0860ce0ae"}, + {file = "pydantic_core-2.33.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7419241e17c7fbe5074ba79143d5523270e04f86f1b3a0dff8df490f84c8273a"}, + {file = "pydantic_core-2.33.0-cp39-cp39-win32.whl", hash = "sha256:7a25493320203005d2a4dac76d1b7d953cb49bce6d459d9ae38e30dd9f29bc9c"}, + {file = "pydantic_core-2.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:82a4eba92b7ca8af1b7d5ef5f3d9647eee94d1f74d21ca7c21e3a2b92e008358"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2762c568596332fdab56b07060c8ab8362c56cf2a339ee54e491cd503612c50"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bf637300ff35d4f59c006fff201c510b2b5e745b07125458a5389af3c0dff8c"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c151ce3d59ed56ebd7ce9ce5986a409a85db697d25fc232f8e81f195aa39a1"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee65f0cc652261744fd07f2c6e6901c914aa6c5ff4dcfaf1136bc394d0dd26b"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:024d136ae44d233e6322027bbf356712b3940bee816e6c948ce4b90f18471b3d"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:e37f10f6d4bc67c58fbd727108ae1d8b92b397355e68519f1e4a7babb1473442"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:502ed542e0d958bd12e7c3e9a015bce57deaf50eaa8c2e1c439b512cb9db1e3a"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:715c62af74c236bf386825c0fdfa08d092ab0f191eb5b4580d11c3189af9d330"}, + {file = "pydantic_core-2.33.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:bccc06fa0372151f37f6b69834181aa9eb57cf8665ed36405fb45fbf6cac3bae"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5d8dc9f63a26f7259b57f46a7aab5af86b2ad6fbe48487500bb1f4b27e051e4c"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:30369e54d6d0113d2aa5aee7a90d17f225c13d87902ace8fcd7bbf99b19124db"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3eb479354c62067afa62f53bb387827bee2f75c9c79ef25eef6ab84d4b1ae3b"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0310524c833d91403c960b8a3cf9f46c282eadd6afd276c8c5edc617bd705dc9"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:eddb18a00bbb855325db27b4c2a89a4ba491cd6a0bd6d852b225172a1f54b36c"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ade5dbcf8d9ef8f4b28e682d0b29f3008df9842bb5ac48ac2c17bc55771cc976"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:2c0afd34f928383e3fd25740f2050dbac9d077e7ba5adbaa2227f4d4f3c8da5c"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7da333f21cd9df51d5731513a6d39319892947604924ddf2e24a4612975fb936"}, + {file = "pydantic_core-2.33.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:4b6d77c75a57f041c5ee915ff0b0bb58eabb78728b69ed967bc5b780e8f701b8"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba95691cf25f63df53c1d342413b41bd7762d9acb425df8858d7efa616c0870e"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f1ab031feb8676f6bd7c85abec86e2935850bf19b84432c64e3e239bffeb1ec"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c1151827eef98b83d49b6ca6065575876a02d2211f259fb1a6b7757bd24dd8"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66d931ea2c1464b738ace44b7334ab32a2fd50be023d863935eb00f42be1778"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0bcf0bab28995d483f6c8d7db25e0d05c3efa5cebfd7f56474359e7137f39856"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:89670d7a0045acb52be0566df5bc8b114ac967c662c06cf5e0c606e4aadc964b"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:b716294e721d8060908dbebe32639b01bfe61b15f9f57bcc18ca9a0e00d9520b"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fc53e05c16697ff0c1c7c2b98e45e131d4bfb78068fffff92a82d169cbb4c7b7"}, + {file = "pydantic_core-2.33.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:68504959253303d3ae9406b634997a2123a0b0c1da86459abbd0ffc921695eac"}, + {file = "pydantic_core-2.33.0.tar.gz", hash = "sha256:40eb8af662ba409c3cbf4a8150ad32ae73514cd7cb1f1a2113af39763dd616b3"}, ] [package.dependencies] @@ -1520,13 +1523,13 @@ six = ">=1.5" [[package]] name = "python-dotenv" -version = "1.0.1" +version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, - {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, + {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, + {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, ] [package.extras] @@ -1534,13 +1537,13 @@ cli = ["click (>=5.0)"] [[package]] name = "pytz" -version = "2025.1" +version = "2025.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, - {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, + {file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"}, + {file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"}, ] [[package]] @@ -1764,114 +1767,125 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rpds-py" -version = "0.23.1" +version = "0.24.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" files = [ - {file = "rpds_py-0.23.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2a54027554ce9b129fc3d633c92fa33b30de9f08bc61b32c053dc9b537266fed"}, - {file = "rpds_py-0.23.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b5ef909a37e9738d146519657a1aab4584018746a18f71c692f2f22168ece40c"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ee9d6f0b38efb22ad94c3b68ffebe4c47865cdf4b17f6806d6c674e1feb4246"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f7356a6da0562190558c4fcc14f0281db191cdf4cb96e7604c06acfcee96df15"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9441af1d25aed96901f97ad83d5c3e35e6cd21a25ca5e4916c82d7dd0490a4fa"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d8abf7896a91fb97e7977d1aadfcc2c80415d6dc2f1d0fca5b8d0df247248f3"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b08027489ba8fedde72ddd233a5ea411b85a6ed78175f40285bd401bde7466d"}, - {file = "rpds_py-0.23.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fee513135b5a58f3bb6d89e48326cd5aa308e4bcdf2f7d59f67c861ada482bf8"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:35d5631ce0af26318dba0ae0ac941c534453e42f569011585cb323b7774502a5"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a20cb698c4a59c534c6701b1c24a968ff2768b18ea2991f886bd8985ce17a89f"}, - {file = "rpds_py-0.23.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e9c206a1abc27e0588cf8b7c8246e51f1a16a103734f7750830a1ccb63f557a"}, - {file = "rpds_py-0.23.1-cp310-cp310-win32.whl", hash = "sha256:d9f75a06ecc68f159d5d7603b734e1ff6daa9497a929150f794013aa9f6e3f12"}, - {file = "rpds_py-0.23.1-cp310-cp310-win_amd64.whl", hash = "sha256:f35eff113ad430b5272bbfc18ba111c66ff525828f24898b4e146eb479a2cdda"}, - {file = "rpds_py-0.23.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b79f5ced71efd70414a9a80bbbfaa7160da307723166f09b69773153bf17c590"}, - {file = "rpds_py-0.23.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c9e799dac1ffbe7b10c1fd42fe4cd51371a549c6e108249bde9cd1200e8f59b4"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:721f9c4011b443b6e84505fc00cc7aadc9d1743f1c988e4c89353e19c4a968ee"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f88626e3f5e57432e6191cd0c5d6d6b319b635e70b40be2ffba713053e5147dd"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:285019078537949cecd0190f3690a0b0125ff743d6a53dfeb7a4e6787af154f5"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b92f5654157de1379c509b15acec9d12ecf6e3bc1996571b6cb82a4302060447"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e768267cbe051dd8d1c5305ba690bb153204a09bf2e3de3ae530de955f5b5580"}, - {file = "rpds_py-0.23.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c5334a71f7dc1160382d45997e29f2637c02f8a26af41073189d79b95d3321f1"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6adb81564af0cd428910f83fa7da46ce9ad47c56c0b22b50872bc4515d91966"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:cafa48f2133d4daa028473ede7d81cd1b9f9e6925e9e4003ebdf77010ee02f35"}, - {file = "rpds_py-0.23.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0fced9fd4a07a1ded1bac7e961ddd9753dd5d8b755ba8e05acba54a21f5f1522"}, - {file = "rpds_py-0.23.1-cp311-cp311-win32.whl", hash = "sha256:243241c95174b5fb7204c04595852fe3943cc41f47aa14c3828bc18cd9d3b2d6"}, - {file = "rpds_py-0.23.1-cp311-cp311-win_amd64.whl", hash = "sha256:11dd60b2ffddba85715d8a66bb39b95ddbe389ad2cfcf42c833f1bcde0878eaf"}, - {file = "rpds_py-0.23.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3902df19540e9af4cc0c3ae75974c65d2c156b9257e91f5101a51f99136d834c"}, - {file = "rpds_py-0.23.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66f8d2a17e5838dd6fb9be6baaba8e75ae2f5fa6b6b755d597184bfcd3cb0eba"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:112b8774b0b4ee22368fec42749b94366bd9b536f8f74c3d4175d4395f5cbd31"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0df046f2266e8586cf09d00588302a32923eb6386ced0ca5c9deade6af9a149"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f3288930b947cbebe767f84cf618d2cbe0b13be476e749da0e6a009f986248c"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce473a2351c018b06dd8d30d5da8ab5a0831056cc53b2006e2a8028172c37ce5"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d550d7e9e7d8676b183b37d65b5cd8de13676a738973d330b59dc8312df9c5dc"}, - {file = "rpds_py-0.23.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e14f86b871ea74c3fddc9a40e947d6a5d09def5adc2076ee61fb910a9014fb35"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1bf5be5ba34e19be579ae873da515a2836a2166d8d7ee43be6ff909eda42b72b"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7031d493c4465dbc8d40bd6cafefef4bd472b17db0ab94c53e7909ee781b9ef"}, - {file = "rpds_py-0.23.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:55ff4151cfd4bc635e51cfb1c59ac9f7196b256b12e3a57deb9e5742e65941ad"}, - {file = "rpds_py-0.23.1-cp312-cp312-win32.whl", hash = "sha256:a9d3b728f5a5873d84cba997b9d617c6090ca5721caaa691f3b1a78c60adc057"}, - {file = "rpds_py-0.23.1-cp312-cp312-win_amd64.whl", hash = "sha256:b03a8d50b137ee758e4c73638b10747b7c39988eb8e6cd11abb7084266455165"}, - {file = "rpds_py-0.23.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:4caafd1a22e5eaa3732acb7672a497123354bef79a9d7ceed43387d25025e935"}, - {file = "rpds_py-0.23.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:178f8a60fc24511c0eb756af741c476b87b610dba83270fce1e5a430204566a4"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c632419c3870507ca20a37c8f8f5352317aca097639e524ad129f58c125c61c6"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:698a79d295626ee292d1730bc2ef6e70a3ab135b1d79ada8fde3ed0047b65a10"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:271fa2184cf28bdded86bb6217c8e08d3a169fe0bbe9be5e8d96e8476b707122"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b91cceb5add79ee563bd1f70b30896bd63bc5f78a11c1f00a1e931729ca4f1f4"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3a6cb95074777f1ecda2ca4fa7717caa9ee6e534f42b7575a8f0d4cb0c24013"}, - {file = "rpds_py-0.23.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:50fb62f8d8364978478b12d5f03bf028c6bc2af04082479299139dc26edf4c64"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c8f7e90b948dc9dcfff8003f1ea3af08b29c062f681c05fd798e36daa3f7e3e8"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:5b98b6c953e5c2bda51ab4d5b4f172617d462eebc7f4bfdc7c7e6b423f6da957"}, - {file = "rpds_py-0.23.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2893d778d4671ee627bac4037a075168b2673c57186fb1a57e993465dbd79a93"}, - {file = "rpds_py-0.23.1-cp313-cp313-win32.whl", hash = "sha256:2cfa07c346a7ad07019c33fb9a63cf3acb1f5363c33bc73014e20d9fe8b01cdd"}, - {file = "rpds_py-0.23.1-cp313-cp313-win_amd64.whl", hash = "sha256:3aaf141d39f45322e44fc2c742e4b8b4098ead5317e5f884770c8df0c332da70"}, - {file = "rpds_py-0.23.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:759462b2d0aa5a04be5b3e37fb8183615f47014ae6b116e17036b131985cb731"}, - {file = "rpds_py-0.23.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3e9212f52074fc9d72cf242a84063787ab8e21e0950d4d6709886fb62bcb91d5"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e9f3a3ac919406bc0414bbbd76c6af99253c507150191ea79fab42fdb35982a"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c04ca91dda8a61584165825907f5c967ca09e9c65fe8966ee753a3f2b019fe1e"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ab923167cfd945abb9b51a407407cf19f5bee35001221f2911dc85ffd35ff4f"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed6f011bedca8585787e5082cce081bac3d30f54520097b2411351b3574e1219"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6959bb9928c5c999aba4a3f5a6799d571ddc2c59ff49917ecf55be2bbb4e3722"}, - {file = "rpds_py-0.23.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ed7de3c86721b4e83ac440751329ec6a1102229aa18163f84c75b06b525ad7e"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:5fb89edee2fa237584e532fbf78f0ddd1e49a47c7c8cfa153ab4849dc72a35e6"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7e5413d2e2d86025e73f05510ad23dad5950ab8417b7fc6beaad99be8077138b"}, - {file = "rpds_py-0.23.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d31ed4987d72aabdf521eddfb6a72988703c091cfc0064330b9e5f8d6a042ff5"}, - {file = "rpds_py-0.23.1-cp313-cp313t-win32.whl", hash = "sha256:f3429fb8e15b20961efca8c8b21432623d85db2228cc73fe22756c6637aa39e7"}, - {file = "rpds_py-0.23.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d6f6512a90bd5cd9030a6237f5346f046c6f0e40af98657568fa45695d4de59d"}, - {file = "rpds_py-0.23.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:09cd7dbcb673eb60518231e02874df66ec1296c01a4fcd733875755c02014b19"}, - {file = "rpds_py-0.23.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c6760211eee3a76316cf328f5a8bd695b47b1626d21c8a27fb3b2473a884d597"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72e680c1518733b73c994361e4b06441b92e973ef7d9449feec72e8ee4f713da"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae28144c1daa61366205d32abd8c90372790ff79fc60c1a8ad7fd3c8553a600e"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c698d123ce5d8f2d0cd17f73336615f6a2e3bdcedac07a1291bb4d8e7d82a05a"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98b257ae1e83f81fb947a363a274c4eb66640212516becaff7bef09a5dceacaa"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c9ff044eb07c8468594d12602291c635da292308c8c619244e30698e7fc455a"}, - {file = "rpds_py-0.23.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7938c7b0599a05246d704b3f5e01be91a93b411d0d6cc62275f025293b8a11ce"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e9cb79ecedfc156c0692257ac7ed415243b6c35dd969baa461a6888fc79f2f07"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:7b77e07233925bd33fc0022b8537774423e4c6680b6436316c5075e79b6384f4"}, - {file = "rpds_py-0.23.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a970bfaf130c29a679b1d0a6e0f867483cea455ab1535fb427566a475078f27f"}, - {file = "rpds_py-0.23.1-cp39-cp39-win32.whl", hash = "sha256:4233df01a250b3984465faed12ad472f035b7cd5240ea3f7c76b7a7016084495"}, - {file = "rpds_py-0.23.1-cp39-cp39-win_amd64.whl", hash = "sha256:c617d7453a80e29d9973b926983b1e700a9377dbe021faa36041c78537d7b08c"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c1f8afa346ccd59e4e5630d5abb67aba6a9812fddf764fd7eb11f382a345f8cc"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fad784a31869747df4ac968a351e070c06ca377549e4ace94775aaa3ab33ee06"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a96fcac2f18e5a0a23a75cd27ce2656c66c11c127b0318e508aab436b77428"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3e77febf227a1dc3220159355dba68faa13f8dca9335d97504abf428469fb18b"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26bb3e8de93443d55e2e748e9fd87deb5f8075ca7bc0502cfc8be8687d69a2ec"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db7707dde9143a67b8812c7e66aeb2d843fe33cc8e374170f4d2c50bd8f2472d"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eedaaccc9bb66581d4ae7c50e15856e335e57ef2734dbc5fd8ba3e2a4ab3cb6"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28358c54fffadf0ae893f6c1050e8f8853e45df22483b7fff2f6ab6152f5d8bf"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:633462ef7e61d839171bf206551d5ab42b30b71cac8f10a64a662536e057fdef"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:a98f510d86f689fcb486dc59e6e363af04151e5260ad1bdddb5625c10f1e95f8"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e0397dd0b3955c61ef9b22838144aa4bef6f0796ba5cc8edfc64d468b93798b4"}, - {file = "rpds_py-0.23.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:75307599f0d25bf6937248e5ac4e3bde5ea72ae6618623b86146ccc7845ed00b"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3614d280bf7aab0d3721b5ce0e73434acb90a2c993121b6e81a1c15c665298ac"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e5963ea87f88bddf7edd59644a35a0feecf75f8985430124c253612d4f7d27ae"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad76f44f70aac3a54ceb1813ca630c53415da3a24fd93c570b2dfb4856591017"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2c6ae11e6e93728d86aafc51ced98b1658a0080a7dd9417d24bfb955bb09c3c2"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc869af5cba24d45fb0399b0cfdbcefcf6910bf4dee5d74036a57cf5264b3ff4"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c76b32eb2ab650a29e423525e84eb197c45504b1c1e6e17b6cc91fcfeb1a4b1d"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4263320ed887ed843f85beba67f8b2d1483b5947f2dc73a8b068924558bfeace"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7f9682a8f71acdf59fd554b82b1c12f517118ee72c0f3944eda461606dfe7eb9"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:754fba3084b70162a6b91efceee8a3f06b19e43dac3f71841662053c0584209a"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:a1c66e71ecfd2a4acf0e4bd75e7a3605afa8f9b28a3b497e4ba962719df2be57"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:8d67beb6002441faef8251c45e24994de32c4c8686f7356a1f601ad7c466f7c3"}, - {file = "rpds_py-0.23.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a1e17d8dc8e57d8e0fd21f8f0f0a5211b3fa258b2e444c2053471ef93fe25a00"}, - {file = "rpds_py-0.23.1.tar.gz", hash = "sha256:7f3240dcfa14d198dba24b8b9cb3b108c06b68d45b7babd9eefc1038fdf7e707"}, + {file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"}, + {file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8acd55bd5b071156bae57b555f5d33697998752673b9de554dd82f5b5352727"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7e80d375134ddb04231a53800503752093dbb65dad8dabacce2c84cccc78e964"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60748789e028d2a46fc1c70750454f83c6bdd0d05db50f5ae83e2db500b34da5"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e1daf5bf6c2be39654beae83ee6b9a12347cb5aced9a29eecf12a2d25fff664"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b221c2457d92a1fb3c97bee9095c874144d196f47c038462ae6e4a14436f7bc"}, + {file = "rpds_py-0.24.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:66420986c9afff67ef0c5d1e4cdc2d0e5262f53ad11e4f90e5e22448df485bf0"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:43dba99f00f1d37b2a0265a259592d05fcc8e7c19d140fe51c6e6f16faabeb1f"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:a88c0d17d039333a41d9bf4616bd062f0bd7aa0edeb6cafe00a2fc2a804e944f"}, + {file = "rpds_py-0.24.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc31e13ce212e14a539d430428cd365e74f8b2d534f8bc22dd4c9c55b277b875"}, + {file = "rpds_py-0.24.0-cp310-cp310-win32.whl", hash = "sha256:fc2c1e1b00f88317d9de6b2c2b39b012ebbfe35fe5e7bef980fd2a91f6100a07"}, + {file = "rpds_py-0.24.0-cp310-cp310-win_amd64.whl", hash = "sha256:c0145295ca415668420ad142ee42189f78d27af806fcf1f32a18e51d47dd2052"}, + {file = "rpds_py-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:2d3ee4615df36ab8eb16c2507b11e764dcc11fd350bbf4da16d09cda11fcedef"}, + {file = "rpds_py-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e13ae74a8a3a0c2f22f450f773e35f893484fcfacb00bb4344a7e0f4f48e1f97"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf86f72d705fc2ef776bb7dd9e5fbba79d7e1f3e258bf9377f8204ad0fc1c51e"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c43583ea8517ed2e780a345dd9960896afc1327e8cf3ac8239c167530397440d"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cd031e63bc5f05bdcda120646a0d32f6d729486d0067f09d79c8db5368f4586"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:34d90ad8c045df9a4259c47d2e16a3f21fdb396665c94520dbfe8766e62187a4"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e838bf2bb0b91ee67bf2b889a1a841e5ecac06dd7a2b1ef4e6151e2ce155c7ae"}, + {file = "rpds_py-0.24.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04ecf5c1ff4d589987b4d9882872f80ba13da7d42427234fce8f22efb43133bc"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:630d3d8ea77eabd6cbcd2ea712e1c5cecb5b558d39547ac988351195db433f6c"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ebcb786b9ff30b994d5969213a8430cbb984cdd7ea9fd6df06663194bd3c450c"}, + {file = "rpds_py-0.24.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:174e46569968ddbbeb8a806d9922f17cd2b524aa753b468f35b97ff9c19cb718"}, + {file = "rpds_py-0.24.0-cp311-cp311-win32.whl", hash = "sha256:5ef877fa3bbfb40b388a5ae1cb00636a624690dcb9a29a65267054c9ea86d88a"}, + {file = "rpds_py-0.24.0-cp311-cp311-win_amd64.whl", hash = "sha256:e274f62cbd274359eff63e5c7e7274c913e8e09620f6a57aae66744b3df046d6"}, + {file = "rpds_py-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:d8551e733626afec514b5d15befabea0dd70a343a9f23322860c4f16a9430205"}, + {file = "rpds_py-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0e374c0ce0ca82e5b67cd61fb964077d40ec177dd2c4eda67dba130de09085c7"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d69d003296df4840bd445a5d15fa5b6ff6ac40496f956a221c4d1f6f7b4bc4d9"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8212ff58ac6dfde49946bea57474a386cca3f7706fc72c25b772b9ca4af6b79e"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:528927e63a70b4d5f3f5ccc1fa988a35456eb5d15f804d276709c33fc2f19bda"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a824d2c7a703ba6daaca848f9c3d5cb93af0505be505de70e7e66829affd676e"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44d51febb7a114293ffd56c6cf4736cb31cd68c0fddd6aa303ed09ea5a48e029"}, + {file = "rpds_py-0.24.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3fab5f4a2c64a8fb64fc13b3d139848817a64d467dd6ed60dcdd6b479e7febc9"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9be4f99bee42ac107870c61dfdb294d912bf81c3c6d45538aad7aecab468b6b7"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:564c96b6076a98215af52f55efa90d8419cc2ef45d99e314fddefe816bc24f91"}, + {file = "rpds_py-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:75a810b7664c17f24bf2ffd7f92416c00ec84b49bb68e6a0d93e542406336b56"}, + {file = "rpds_py-0.24.0-cp312-cp312-win32.whl", hash = "sha256:f6016bd950be4dcd047b7475fdf55fb1e1f59fc7403f387be0e8123e4a576d30"}, + {file = "rpds_py-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:998c01b8e71cf051c28f5d6f1187abbdf5cf45fc0efce5da6c06447cba997034"}, + {file = "rpds_py-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2d8e4508e15fc05b31285c4b00ddf2e0eb94259c2dc896771966a163122a0c"}, + {file = "rpds_py-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0f00c16e089282ad68a3820fd0c831c35d3194b7cdc31d6e469511d9bffc535c"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951cc481c0c395c4a08639a469d53b7d4afa252529a085418b82a6b43c45c240"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c9ca89938dff18828a328af41ffdf3902405a19f4131c88e22e776a8e228c5a8"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed0ef550042a8dbcd657dfb284a8ee00f0ba269d3f2286b0493b15a5694f9fe8"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b2356688e5d958c4d5cb964af865bea84db29971d3e563fb78e46e20fe1848b"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78884d155fd15d9f64f5d6124b486f3d3f7fd7cd71a78e9670a0f6f6ca06fb2d"}, + {file = "rpds_py-0.24.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6a4a535013aeeef13c5532f802708cecae8d66c282babb5cd916379b72110cf7"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:84e0566f15cf4d769dade9b366b7b87c959be472c92dffb70462dd0844d7cbad"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:823e74ab6fbaa028ec89615ff6acb409e90ff45580c45920d4dfdddb069f2120"}, + {file = "rpds_py-0.24.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c61a2cb0085c8783906b2f8b1f16a7e65777823c7f4d0a6aaffe26dc0d358dd9"}, + {file = "rpds_py-0.24.0-cp313-cp313-win32.whl", hash = "sha256:60d9b630c8025b9458a9d114e3af579a2c54bd32df601c4581bd054e85258143"}, + {file = "rpds_py-0.24.0-cp313-cp313-win_amd64.whl", hash = "sha256:6eea559077d29486c68218178ea946263b87f1c41ae7f996b1f30a983c476a5a"}, + {file = "rpds_py-0.24.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:d09dc82af2d3c17e7dd17120b202a79b578d79f2b5424bda209d9966efeed114"}, + {file = "rpds_py-0.24.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5fc13b44de6419d1e7a7e592a4885b323fbc2f46e1f22151e3a8ed3b8b920405"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c347a20d79cedc0a7bd51c4d4b7dbc613ca4e65a756b5c3e57ec84bd43505b47"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20f2712bd1cc26a3cc16c5a1bfee9ed1abc33d4cdf1aabd297fe0eb724df4272"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aad911555286884be1e427ef0dc0ba3929e6821cbeca2194b13dc415a462c7fd"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0aeb3329c1721c43c58cae274d7d2ca85c1690d89485d9c63a006cb79a85771a"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a0f156e9509cee987283abd2296ec816225145a13ed0391df8f71bf1d789e2d"}, + {file = "rpds_py-0.24.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa6800adc8204ce898c8a424303969b7aa6a5e4ad2789c13f8648739830323b7"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a18fc371e900a21d7392517c6f60fe859e802547309e94313cd8181ad9db004d"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9168764133fd919f8dcca2ead66de0105f4ef5659cbb4fa044f7014bed9a1797"}, + {file = "rpds_py-0.24.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5f6e3cec44ba05ee5cbdebe92d052f69b63ae792e7d05f1020ac5e964394080c"}, + {file = "rpds_py-0.24.0-cp313-cp313t-win32.whl", hash = "sha256:8ebc7e65ca4b111d928b669713865f021b7773350eeac4a31d3e70144297baba"}, + {file = "rpds_py-0.24.0-cp313-cp313t-win_amd64.whl", hash = "sha256:675269d407a257b8c00a6b58205b72eec8231656506c56fd429d924ca00bb350"}, + {file = "rpds_py-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a36b452abbf29f68527cf52e181fced56685731c86b52e852053e38d8b60bc8d"}, + {file = "rpds_py-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8b3b397eefecec8e8e39fa65c630ef70a24b09141a6f9fc17b3c3a50bed6b50e"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdabcd3beb2a6dca7027007473d8ef1c3b053347c76f685f5f060a00327b8b65"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5db385bacd0c43f24be92b60c857cf760b7f10d8234f4bd4be67b5b20a7c0b6b"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8097b3422d020ff1c44effc40ae58e67d93e60d540a65649d2cdaf9466030791"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493fe54318bed7d124ce272fc36adbf59d46729659b2c792e87c3b95649cdee9"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8aa362811ccdc1f8dadcc916c6d47e554169ab79559319ae9fae7d7752d0d60c"}, + {file = "rpds_py-0.24.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d8f9a6e7fd5434817526815f09ea27f2746c4a51ee11bb3439065f5fc754db58"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8205ee14463248d3349131bb8099efe15cd3ce83b8ef3ace63c7e976998e7124"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:921ae54f9ecba3b6325df425cf72c074cd469dea843fb5743a26ca7fb2ccb149"}, + {file = "rpds_py-0.24.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:32bab0a56eac685828e00cc2f5d1200c548f8bc11f2e44abf311d6b548ce2e45"}, + {file = "rpds_py-0.24.0-cp39-cp39-win32.whl", hash = "sha256:f5c0ed12926dec1dfe7d645333ea59cf93f4d07750986a586f511c0bc61fe103"}, + {file = "rpds_py-0.24.0-cp39-cp39-win_amd64.whl", hash = "sha256:afc6e35f344490faa8276b5f2f7cbf71f88bc2cda4328e00553bd451728c571f"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:619ca56a5468f933d940e1bf431c6f4e13bef8e688698b067ae68eb4f9b30e3a"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b28e5122829181de1898c2c97f81c0b3246d49f585f22743a1246420bb8d399"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8e5ab32cf9eb3647450bc74eb201b27c185d3857276162c101c0f8c6374e098"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:208b3a70a98cf3710e97cabdc308a51cd4f28aa6e7bb11de3d56cd8b74bab98d"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbc4362e06f950c62cad3d4abf1191021b2ffaf0b31ac230fbf0526453eee75e"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ebea2821cdb5f9fef44933617be76185b80150632736f3d76e54829ab4a3b4d1"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9a4df06c35465ef4d81799999bba810c68d29972bf1c31db61bfdb81dd9d5bb"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d3aa13bdf38630da298f2e0d77aca967b200b8cc1473ea05248f6c5e9c9bdb44"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:041f00419e1da7a03c46042453598479f45be3d787eb837af382bfc169c0db33"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:d8754d872a5dfc3c5bf9c0e059e8107451364a30d9fd50f1f1a85c4fb9481164"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:896c41007931217a343eff197c34513c154267636c8056fb409eafd494c3dcdc"}, + {file = "rpds_py-0.24.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:92558d37d872e808944c3c96d0423b8604879a3d1c86fdad508d7ed91ea547d5"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f9e0057a509e096e47c87f753136c9b10d7a91842d8042c2ee6866899a717c0d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d6e109a454412ab82979c5b1b3aee0604eca4bbf9a02693bb9df027af2bfa91a"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc1c892b1ec1f8cbd5da8de287577b455e388d9c328ad592eabbdcb6fc93bee5"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9c39438c55983d48f4bb3487734d040e22dad200dab22c41e331cee145e7a50d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d7e8ce990ae17dda686f7e82fd41a055c668e13ddcf058e7fb5e9da20b57793"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ea7f4174d2e4194289cb0c4e172d83e79a6404297ff95f2875cf9ac9bced8ba"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb2954155bb8f63bb19d56d80e5e5320b61d71084617ed89efedb861a684baea"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04f2b712a2206e13800a8136b07aaedc23af3facab84918e7aa89e4be0260032"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:eda5c1e2a715a4cbbca2d6d304988460942551e4e5e3b7457b50943cd741626d"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:9abc80fe8c1f87218db116016de575a7998ab1629078c90840e8d11ab423ee25"}, + {file = "rpds_py-0.24.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:6a727fd083009bc83eb83d6950f0c32b3c94c8b80a9b667c87f4bd1274ca30ba"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e0f3ef95795efcd3b2ec3fe0a5bcfb5dadf5e3996ea2117427e524d4fbf309c6"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2c13777ecdbbba2077670285dd1fe50828c8742f6a4119dbef6f83ea13ad10fb"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e8d804c2ccd618417e96720ad5cd076a86fa3f8cb310ea386a3e6229bae7d1"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fd822f019ccccd75c832deb7aa040bb02d70a92eb15a2f16c7987b7ad4ee8d83"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0047638c3aa0dbcd0ab99ed1e549bbf0e142c9ecc173b6492868432d8989a046"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5b66d1b201cc71bc3081bc2f1fc36b0c1f268b773e03bbc39066651b9e18391"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbcbb6db5582ea33ce46a5d20a5793134b5365110d84df4e30b9d37c6fd40ad3"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:63981feca3f110ed132fd217bf7768ee8ed738a55549883628ee3da75bb9cb78"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:3a55fc10fdcbf1a4bd3c018eea422c52cf08700cf99c28b5cb10fe97ab77a0d3"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:c30ff468163a48535ee7e9bf21bd14c7a81147c0e58a36c1078289a8ca7af0bd"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:369d9c6d4c714e36d4a03957b4783217a3ccd1e222cdd67d464a3a479fc17796"}, + {file = "rpds_py-0.24.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:24795c099453e3721fda5d8ddd45f5dfcc8e5a547ce7b8e9da06fecc3832e26f"}, + {file = "rpds_py-0.24.0.tar.gz", hash = "sha256:772cc1b2cd963e7e17e6cc55fe0371fb9c704d63e44cacec7b9b7f523b78919e"}, ] [[package]] @@ -2080,13 +2094,13 @@ referencing = "*" [[package]] name = "types-protobuf" -version = "5.29.1.20250315" +version = "5.29.1.20250403" description = "Typing stubs for protobuf" optional = false python-versions = ">=3.9" files = [ - {file = "types_protobuf-5.29.1.20250315-py3-none-any.whl", hash = "sha256:57efd51fd0979d1f5e1d94053d1e7cfff9c028e8d05b17e341b91a1c7fce37c4"}, - {file = "types_protobuf-5.29.1.20250315.tar.gz", hash = "sha256:0b05bc34621d046de54b94fddd5f4eb3bf849fe2e13a50f8fb8e89f35045ff49"}, + {file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"}, + {file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"}, ] [[package]] @@ -2102,13 +2116,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20250306" +version = "2.32.0.20250328" description = "Typing stubs for requests" optional = false python-versions = ">=3.9" files = [ - {file = "types_requests-2.32.0.20250306-py3-none-any.whl", hash = "sha256:25f2cbb5c8710b2022f8bbee7b2b66f319ef14aeea2f35d80f18c9dbf3b60a0b"}, - {file = "types_requests-2.32.0.20250306.tar.gz", hash = "sha256:0962352694ec5b2f95fda877ee60a159abdf84a0fc6fdace599f20acb41a03d1"}, + {file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"}, + {file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"}, ] [package.dependencies] @@ -2116,24 +2130,38 @@ urllib3 = ">=2" [[package]] name = "typing-extensions" -version = "4.12.2" +version = "4.13.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, + {file = "typing_extensions-4.13.0-py3-none-any.whl", hash = "sha256:c8dd92cc0d6425a97c18fbb9d1954e5ff92c1ca881a309c45f06ebc0b79058e5"}, + {file = "typing_extensions-4.13.0.tar.gz", hash = "sha256:0a4ac55a5820789d87e297727d229866c9650f6521b64206413c4fbada24d95b"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +files = [ + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] +[package.dependencies] +typing-extensions = ">=4.12.0" + [[package]] name = "tzdata" -version = "2025.1" +version = "2025.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2025.1-py2.py3-none-any.whl", hash = "sha256:7e127113816800496f027041c570f50bcd464a020098a3b6b199517772303639"}, - {file = "tzdata-2025.1.tar.gz", hash = "sha256:24894909e88cdb28bd1636c6887801df64cb485bd593f2fd83ef29075a81d694"}, + {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, + {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, ] [[package]] @@ -2263,4 +2291,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.0" python-versions = ">=3.9,<4" -content-hash = "f0c2cc54d36eefaf4850d23468d74ce9ed038ac365b4367bd6cc35524de7e942" +content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d" diff --git a/pyproject.toml b/pyproject.toml index 101c57dd..8565e6b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "humanloop" [tool.poetry] name = "humanloop" -version = "0.8.29" +version = "0.8.29b" description = "" readme = "README.md" authors = [] @@ -39,7 +39,7 @@ deepdiff = "^8.2.0" httpx = ">=0.21.2" httpx-sse = "0.4.0" mmh3 = "^5.1.0" -opentelemetry-api = ">=1.28.0" +opentelemetry-api = ">=1.27.0" opentelemetry-instrumentation-anthropic = ">=0.20" opentelemetry-instrumentation-bedrock = ">=0.15" opentelemetry-instrumentation-cohere = ">=0.20" @@ -47,7 +47,7 @@ opentelemetry-instrumentation-groq = ">=0.29" opentelemetry-instrumentation-openai = ">=0.20" opentelemetry-instrumentation-replicate = ">=0.20" opentelemetry-proto = ">=1.30.0" -opentelemetry-sdk = ">=1.28.0" +opentelemetry-sdk = ">=1.27.0" parse = ">=1" protobuf = ">=5.29.3" pydantic = ">= 1.9.2" diff --git a/reference.md b/reference.md index bed97357..a8197494 100644 --- a/reference.md +++ b/reference.md @@ -2514,6 +2514,212 @@ client.prompts.update_monitoring( ## Tools +
client.tools.call(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call a Tool. + +Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Tool. Otherwise, the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Tool details in the request body. In this case, we will check if the details correspond +to an existing version of the Tool. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Tool details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.call() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Tool to call. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to call. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Tool. + +
+
+ +
+
+ +**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**tool_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+
client.tools.log(...)
@@ -2620,6 +2826,14 @@ client.tools.log(
+**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new. + +
+
+ +
+
+ **start_time:** `typing.Optional[dt.datetime]` — When the logged event started.
@@ -2772,14 +2986,6 @@ client.tools.log(
-**tool:** `typing.Optional[ToolKernelRequestParams]` — Details of your Tool. A new Tool version will be created if the provided details are new. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4073,25 +4279,10 @@ client.tools.update_monitoring(
-## Datasets -
client.datasets.list(...) -
-
- -#### 📝 Description - +
client.tools.get_environment_variables(...)
-
-
- -List all Datasets. -
-
-
-
- #### 🔌 Usage
@@ -4106,14 +4297,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -response = client.datasets.list( - size=1, +client.tools.get_environment_variables( + id="id", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -4129,7 +4315,7 @@ for page in response.iter_pages():
-**page:** `typing.Optional[int]` — Page offset for pagination. +**id:** `str` — Unique identifier for File.
@@ -4137,34 +4323,254 @@ for page in response.iter_pages():
-**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name. -
-
-
-**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. -
+
+
client.tools.add_environment_variable(...)
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by - -
-
+#### 📝 Description + +
+
+ +
+
+ +Add an environment variable to a Tool. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Tool. + +
+
+ +
+
+ +**request:** `typing.Sequence[FileEnvironmentVariableRequestParams]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + + + +
+ +
client.tools.delete_environment_variable(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.tools.delete_environment_variable( + id="id", + name="name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for File. + +
+
+ +
+
+ +**name:** `str` — Name of the Environment Variable to delete. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Datasets +
client.datasets.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Datasets. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +response = client.datasets.list( + size=1, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page offset for pagination. + +
+
+ +
+
+ +**size:** `typing.Optional[int]` — Page size for pagination. Number of Datasets to fetch. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Case-insensitive filter for Dataset name. + +
+
+ +
+
+ +**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users. + +
+
+ +
+
+ +**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by + +
+
@@ -8208,6 +8614,2207 @@ client.flows.update_monitoring(
+
+
+
+ +## Agents +
client.agents.log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an Agent Log. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise, the default deployed version will be chosen. + +If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` +in order to trigger Evaluators. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.log() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**run_id:** `typing.Optional[str]` — Unique identifier for the Run to associate the Log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The message returned by the provider. + +
+
+ +
+
+ +**prompt_tokens:** `typing.Optional[int]` — Number of tokens in the prompt used to generate the output. + +
+
+ +
+
+ +**reasoning_tokens:** `typing.Optional[int]` — Number of reasoning tokens used to generate the output. + +
+
+ +
+
+ +**output_tokens:** `typing.Optional[int]` — Number of tokens in the output generated by the model. + +
+
+ +
+
+ +**prompt_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the prompt. + +
+
+ +
+
+ +**output_cost:** `typing.Optional[float]` — Cost in dollars associated to the tokens in the output. + +
+
+ +
+
+ +**finish_reason:** `typing.Optional[str]` — Reason the generation finished. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentLogRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + +
+
+ +
+
+ +**created_at:** `typing.Optional[dt.datetime]` — User defined timestamp for when the log was created. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — Error message if the log is an error. + +
+
+ +
+
+ +**provider_latency:** `typing.Optional[float]` — Duration of the logged event in seconds. + +
+
+ +
+
+ +**stdout:** `typing.Optional[str]` — Captured log and debug statements. + +
+
+ +
+
+ +**provider_request:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw request sent to provider. + +
+
+ +
+
+ +**provider_response:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Raw response received the provider. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agent_log_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.update_log(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a Log. + +Update the details of a Log with the given ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.update_log( + id="id", + log_id="log_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**log_id:** `str` — Unique identifier for the Log. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — List of chat messages that were used as an input to the Flow. + +
+
+ +
+
+ +**output_message:** `typing.Optional[ChatMessageParams]` — The output message returned by this Flow. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the Flow Log. + +
+
+ +
+
+ +**output:** `typing.Optional[str]` — The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + +
+
+ +
+
+ +**error:** `typing.Optional[str]` — The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.call_stream(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. + +Calling an Agent calls the model provider before logging +the request, responses and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. In this case, we will check if the details correspond +to an existing version of the Agent. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Agent details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +response = client.agents.call_stream() +for chunk in response: + yield chunk + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentsCallStreamRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agents_call_stream_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + +
+
+ +
+
+ +**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.call(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Call an Agent. + +Calling an Agent calls the model provider before logging +the request, responses and metadata to Humanloop. + +You can use query parameters `version_id`, or `environment`, to target +an existing version of the Agent. Otherwise the default deployed version will be chosen. + +Instead of targeting an existing version explicitly, you can instead pass in +Agent details in the request body. In this case, we will check if the details correspond +to an existing version of the Agent. If they do not, we will create a new version. This is helpful +in the case where you are storing or deriving your Agent details in code. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.call() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to log to. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed version to log to. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**messages:** `typing.Optional[typing.Sequence[ChatMessageParams]]` — The messages passed to the to provider chat endpoint. + +
+
+ +
+
+ +**tool_choice:** `typing.Optional[AgentsCallRequestToolChoiceParams]` + +Controls how the model uses tools. The following options are supported: +- `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. +- `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. +- `'required'` means the model can decide to call one or more of the provided tools. +- `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + +
+
+ +
+
+ +**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new. + +
+
+ +
+
+ +**inputs:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — The inputs passed to the prompt template. + +
+
+ +
+
+ +**source:** `typing.Optional[str]` — Identifies where the model was called from. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Any additional metadata to record. + +
+
+ +
+
+ +**start_time:** `typing.Optional[dt.datetime]` — When the logged event started. + +
+
+ +
+
+ +**end_time:** `typing.Optional[dt.datetime]` — When the logged event ended. + +
+
+ +
+
+ +**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + +
+
+ +
+
+ +**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + +
+
+ +
+
+ +**trace_parent_id:** `typing.Optional[str]` — The ID of the parent Log to nest this Log under in a Trace. + +
+
+ +
+
+ +**user:** `typing.Optional[str]` — End-user ID related to the Log. + +
+
+ +
+
+ +**agents_call_request_environment:** `typing.Optional[str]` — The name of the Environment the Log is associated to. + +
+
+ +
+
+ +**save:** `typing.Optional[bool]` — Whether the request/response payloads will be stored on Humanloop. + +
+
+ +
+
+ +**log_id:** `typing.Optional[str]` — This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + +
+
+ +
+
+ +**provider_api_keys:** `typing.Optional[ProviderApiKeysParams]` — API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + +
+
+ +
+
+ +**return_inputs:** `typing.Optional[bool]` — Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.list(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all Agents. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number for pagination. + +
+
+ +
+
+ +**size:** `typing.Optional[int]` — Page size for pagination. Number of Agents to fetch. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Case-insensitive filter for Agent name. + +
+
+ +
+
+ +**user_filter:** `typing.Optional[str]` — Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + +
+
+ +
+
+ +**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by + +
+
+ +
+
+ +**order:** `typing.Optional[SortOrder]` — Direction to sort by. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.upsert(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an Agent or update it with a new version if it already exists. + +Agents are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Agent. + +If you provide a commit message, then the new version will be committed; +otherwise it will be uncommitted. If you try to commit an already committed version, +an exception will be raised. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.upsert( + model="model", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**model:** `str` — The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + +
+
+ +
+
+ +**id:** `typing.Optional[str]` — ID for an existing Agent. + +
+
+ +
+
+ +**endpoint:** `typing.Optional[ModelEndpoints]` — The provider model endpoint used. + +
+
+ +
+
+ +**template:** `typing.Optional[AgentRequestTemplateParams]` + +The template contains the main structure and instructions for the model, including input variables for dynamic values. + +For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. +For completion models, provide a prompt template as a string. + +Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + +
+
+ +
+
+ +**template_language:** `typing.Optional[TemplateLanguage]` — The template language to use for rendering the template. + +
+
+ +
+
+ +**provider:** `typing.Optional[ModelProviders]` — The company providing the underlying model service. + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` — The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + +
+
+ +
+
+ +**temperature:** `typing.Optional[float]` — What sampling temperature to use when making a generation. Higher values means the model will be more creative. + +
+
+ +
+
+ +**top_p:** `typing.Optional[float]` — An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + +
+
+ +
+
+ +**stop:** `typing.Optional[AgentRequestStopParams]` — The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + +
+
+ +
+
+ +**presence_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + +
+
+ +
+
+ +**frequency_penalty:** `typing.Optional[float]` — Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + +
+
+ +
+
+ +**other:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Other parameter values to be passed to the provider call. + +
+
+ +
+
+ +**seed:** `typing.Optional[int]` — If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + +
+
+ +
+
+ +**response_format:** `typing.Optional[ResponseFormatParams]` — The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + +
+
+ +
+
+ +**reasoning_effort:** `typing.Optional[ReasoningEffort]` — Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + +
+
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[AgentRequestToolsItemParams]]` + +
+
+ +
+
+ +**attributes:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + +
+
+ +
+
+ +**max_iterations:** `typing.Optional[int]` — The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + +
+
+ +
+
+ +**commit_message:** `typing.Optional[str]` — Message describing the changes made. + +
+
+ +
+
+ +**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Each Prompt can only have one version with a given name. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the Version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the Prompt. + +
+
+ +
+
+ +**tags:** `typing.Optional[typing.Sequence[str]]` — List of tags associated with this prompt. + +
+
+ +
+
+ +**readme:** `typing.Optional[str]` — Long description of the Prompt. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.get(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Retrieve the Agent with the given ID. + +By default, the deployed version of the Agent is returned. Use the query parameters +`version_id` or `environment` to target a specific version of the Agent. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**version_id:** `typing.Optional[str]` — A specific Version ID of the Agent to retrieve. + +
+
+ +
+
+ +**environment:** `typing.Optional[str]` — Name of the Environment to retrieve a deployed Version from. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete the Agent with the given ID. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.delete( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.move(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Move the Agent to a different path or change the name. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.move( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**path:** `typing.Optional[str]` — Path of the Flow including the Flow name, which is used as a unique identifier. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Name of the Flow. + +
+
+ +
+
+ +**directory_id:** `typing.Optional[str]` — Unique identifier for the Directory to move Flow to. Starts with `dir_`. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.list_versions(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of all the versions of a Agent. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.list_versions( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. + +
+
+ +
+
+ +**evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.commit(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Commit a version of the Agent with a commit message. + +If the version is already committed, an exception will be raised. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.commit( + id="id", + version_id="version_id", + commit_message="commit_message", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**commit_message:** `str` — Message describing the changes made. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.delete_agent_version(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a version of the Agent. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.delete_agent_version( + id="id", + version_id="version_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.set_deployment(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Deploy Agent to an Environment. + +Set the deployed version for the specified Environment. This Agent +will be used for calls made to the Agent in this Environment. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.set_deployment( + id="id", + environment_id="environment_id", + version_id="version_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**environment_id:** `str` — Unique identifier for the Environment to deploy the Version to. + +
+
+ +
+
+ +**version_id:** `str` — Unique identifier for the specific version of the Agent. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.remove_deployment(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Remove deployed Agent from the Environment. + +Remove the deployed version for the specified Environment. This Agent +will no longer be used for calls made to the Agent in this Environment. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.remove_deployment( + id="id", + environment_id="environment_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**environment_id:** `str` — Unique identifier for the Environment to remove the deployment from. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.list_environments(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List all Environments and their deployed versions for the Agent. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.list_environments( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` — Unique identifier for Agent. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.agents.update_monitoring(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Activate and deactivate Evaluators for monitoring the Agent. + +An activated Evaluator will automatically be run on all new Logs +within the Agent for monitoring purposes. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from humanloop import Humanloop + +client = Humanloop( + api_key="YOUR_API_KEY", +) +client.agents.update_monitoring( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**activate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams] +]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs. + +
+
+ +
+
+ +**deactivate:** `typing.Optional[ + typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams] +]` — Evaluators to deactivate. These will not be run on new Logs. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
diff --git a/requirements.txt b/requirements.txt index 0e732599..cd56cd01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ deepdiff==^8.2.0 httpx>=0.21.2 httpx-sse==0.4.0 mmh3==^5.1.0 -opentelemetry-api>=1.28.0 +opentelemetry-api>=1.27.0 opentelemetry-instrumentation-anthropic>=0.20 opentelemetry-instrumentation-bedrock>=0.15 opentelemetry-instrumentation-cohere>=0.20 @@ -10,7 +10,7 @@ opentelemetry-instrumentation-groq>=0.29 opentelemetry-instrumentation-openai>=0.20 opentelemetry-instrumentation-replicate>=0.20 opentelemetry-proto>=1.30.0 -opentelemetry-sdk>=1.28.0 +opentelemetry-sdk>=1.27.0 parse>=1 protobuf>=5.29.3 pydantic>= 1.9.2 diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py index 2734e469..0eae83eb 100644 --- a/src/humanloop/__init__.py +++ b/src/humanloop/__init__.py @@ -1,7 +1,26 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( + AgentCallResponse, + AgentCallResponseToolChoice, + AgentCallStreamResponse, + AgentCallStreamResponsePayload, AgentConfigResponse, + AgentInlineTool, + AgentKernelRequest, + AgentKernelRequestStop, + AgentKernelRequestTemplate, + AgentKernelRequestToolsItem, + AgentLinkedFileRequest, + AgentLinkedFileResponse, + AgentLinkedFileResponseFile, + AgentLogResponse, + AgentLogResponseToolChoice, + AgentLogStreamResponse, + AgentResponse, + AgentResponseStop, + AgentResponseTemplate, + AgentResponseToolsItem, BaseModelsUserResponse, BooleanEvaluatorStatsResponse, ChatMessage, @@ -12,6 +31,7 @@ CodeEvaluatorRequest, CommitRequest, ConfigToolResponse, + CreateAgentLogResponse, CreateDatapointRequest, CreateDatapointRequestTargetValue, CreateEvaluatorLogResponse, @@ -56,10 +76,12 @@ EvaluatorReturnTypeEnum, EvaluatorVersionId, EvaluatorsRequest, + EventType, ExternalEvaluatorRequest, FeedbackType, FileEnvironmentResponse, FileEnvironmentResponseFile, + FileEnvironmentVariableRequest, FileId, FilePath, FileRequest, @@ -77,7 +99,9 @@ ImageUrl, ImageUrlDetail, InputResponse, + LinkedFileRequest, LinkedToolResponse, + ListAgents, ListDatasets, ListEvaluators, ListFlows, @@ -86,6 +110,7 @@ LlmEvaluatorRequest, LogResponse, LogStatus, + LogStreamResponse, ModelEndpoints, ModelProviders, MonitoringEvaluatorEnvironmentRequest, @@ -95,14 +120,15 @@ NumericEvaluatorStatsResponse, ObservabilityStatus, OverallStats, + PaginatedDataAgentResponse, PaginatedDataEvaluationLogResponse, PaginatedDataEvaluatorResponse, PaginatedDataFlowResponse, PaginatedDataLogResponse, PaginatedDataPromptResponse, PaginatedDataToolResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, PaginatedDatapointResponse, PaginatedDatasetResponse, PaginatedEvaluationResponse, @@ -140,6 +166,7 @@ TextEvaluatorStatsResponse, TimeUnit, ToolCall, + ToolCallResponse, ToolChoice, ToolFunction, ToolKernelRequest, @@ -162,7 +189,21 @@ VersionStatus, ) from .errors import UnprocessableEntityError -from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools +from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools +from .agents import ( + AgentLogRequestToolChoice, + AgentLogRequestToolChoiceParams, + AgentRequestStop, + AgentRequestStopParams, + AgentRequestTemplate, + AgentRequestTemplateParams, + AgentRequestToolsItem, + AgentRequestToolsItemParams, + AgentsCallRequestToolChoice, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestToolChoice, + AgentsCallStreamRequestToolChoiceParams, +) from .client import AsyncHumanloop, Humanloop from .environment import HumanloopEnvironment from .evaluations import ( @@ -199,13 +240,33 @@ PromptsCallStreamRequestToolChoiceParams, ) from .requests import ( + AgentCallResponseParams, + AgentCallResponseToolChoiceParams, + AgentCallStreamResponseParams, + AgentCallStreamResponsePayloadParams, AgentConfigResponseParams, + AgentInlineToolParams, + AgentKernelRequestParams, + AgentKernelRequestStopParams, + AgentKernelRequestTemplateParams, + AgentKernelRequestToolsItemParams, + AgentLinkedFileRequestParams, + AgentLinkedFileResponseFileParams, + AgentLinkedFileResponseParams, + AgentLogResponseParams, + AgentLogResponseToolChoiceParams, + AgentLogStreamResponseParams, + AgentResponseParams, + AgentResponseStopParams, + AgentResponseTemplateParams, + AgentResponseToolsItemParams, BooleanEvaluatorStatsResponseParams, ChatMessageContentItemParams, ChatMessageContentParams, ChatMessageParams, CodeEvaluatorRequestParams, CommitRequestParams, + CreateAgentLogResponseParams, CreateDatapointRequestParams, CreateDatapointRequestTargetValueParams, CreateEvaluatorLogResponseParams, @@ -245,6 +306,7 @@ ExternalEvaluatorRequestParams, FileEnvironmentResponseFileParams, FileEnvironmentResponseParams, + FileEnvironmentVariableRequestParams, FileIdParams, FilePathParams, FileRequestParams, @@ -258,7 +320,9 @@ ImageChatContentParams, ImageUrlParams, InputResponseParams, + LinkedFileRequestParams, LinkedToolResponseParams, + ListAgentsParams, ListDatasetsParams, ListEvaluatorsParams, ListFlowsParams, @@ -266,19 +330,21 @@ ListToolsParams, LlmEvaluatorRequestParams, LogResponseParams, + LogStreamResponseParams, MonitoringEvaluatorEnvironmentRequestParams, MonitoringEvaluatorResponseParams, MonitoringEvaluatorVersionRequestParams, NumericEvaluatorStatsResponseParams, OverallStatsParams, + PaginatedDataAgentResponseParams, PaginatedDataEvaluationLogResponseParams, PaginatedDataEvaluatorResponseParams, PaginatedDataFlowResponseParams, PaginatedDataLogResponseParams, PaginatedDataPromptResponseParams, PaginatedDataToolResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, PaginatedDatapointResponseParams, PaginatedDatasetResponseParams, PaginatedEvaluationResponseParams, @@ -307,6 +373,7 @@ TextChatContentParams, TextEvaluatorStatsResponseParams, ToolCallParams, + ToolCallResponseParams, ToolChoiceParams, ToolFunctionParams, ToolKernelRequestParams, @@ -328,8 +395,58 @@ __all__ = [ "AddEvaluatorsRequestEvaluatorsItem", "AddEvaluatorsRequestEvaluatorsItemParams", + "AgentCallResponse", + "AgentCallResponseParams", + "AgentCallResponseToolChoice", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponse", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayload", + "AgentCallStreamResponsePayloadParams", "AgentConfigResponse", "AgentConfigResponseParams", + "AgentInlineTool", + "AgentInlineToolParams", + "AgentKernelRequest", + "AgentKernelRequestParams", + "AgentKernelRequestStop", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplate", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItem", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequest", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentLogResponse", + "AgentLogResponseParams", + "AgentLogResponseToolChoice", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponse", + "AgentLogStreamResponseParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentResponse", + "AgentResponseParams", + "AgentResponseStop", + "AgentResponseStopParams", + "AgentResponseTemplate", + "AgentResponseTemplateParams", + "AgentResponseToolsItem", + "AgentResponseToolsItemParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", "AsyncHumanloop", "BaseModelsUserResponse", "BooleanEvaluatorStatsResponse", @@ -347,6 +464,8 @@ "CommitRequest", "CommitRequestParams", "ConfigToolResponse", + "CreateAgentLogResponse", + "CreateAgentLogResponseParams", "CreateDatapointRequest", "CreateDatapointRequestParams", "CreateDatapointRequestTargetValue", @@ -439,6 +558,7 @@ "EvaluatorVersionId", "EvaluatorVersionIdParams", "EvaluatorsRequest", + "EventType", "ExternalEvaluatorRequest", "ExternalEvaluatorRequestParams", "FeedbackType", @@ -446,6 +566,8 @@ "FileEnvironmentResponseFile", "FileEnvironmentResponseFileParams", "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequest", + "FileEnvironmentVariableRequestParams", "FileId", "FileIdParams", "FilePath", @@ -478,8 +600,12 @@ "ImageUrlParams", "InputResponse", "InputResponseParams", + "LinkedFileRequest", + "LinkedFileRequestParams", "LinkedToolResponse", "LinkedToolResponseParams", + "ListAgents", + "ListAgentsParams", "ListDatasets", "ListDatasetsParams", "ListEvaluators", @@ -495,6 +621,8 @@ "LogResponse", "LogResponseParams", "LogStatus", + "LogStreamResponse", + "LogStreamResponseParams", "ModelEndpoints", "ModelProviders", "MonitoringEvaluatorEnvironmentRequest", @@ -509,6 +637,8 @@ "ObservabilityStatus", "OverallStats", "OverallStatsParams", + "PaginatedDataAgentResponse", + "PaginatedDataAgentResponseParams", "PaginatedDataEvaluationLogResponse", "PaginatedDataEvaluationLogResponseParams", "PaginatedDataEvaluatorResponse", @@ -521,10 +651,10 @@ "PaginatedDataPromptResponseParams", "PaginatedDataToolResponse", "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", "PaginatedDatapointResponse", "PaginatedDatapointResponseParams", "PaginatedDatasetResponse", @@ -604,6 +734,8 @@ "TimeUnit", "ToolCall", "ToolCallParams", + "ToolCallResponse", + "ToolCallResponseParams", "ToolChoice", "ToolChoiceParams", "ToolFunction", @@ -641,6 +773,7 @@ "VersionStatsResponseParams", "VersionStatus", "__version__", + "agents", "datasets", "directories", "evaluations", diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py new file mode 100644 index 00000000..51691fc8 --- /dev/null +++ b/src/humanloop/agents/__init__.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + AgentLogRequestToolChoice, + AgentRequestStop, + AgentRequestTemplate, + AgentRequestToolsItem, + AgentsCallRequestToolChoice, + AgentsCallStreamRequestToolChoice, +) +from .requests import ( + AgentLogRequestToolChoiceParams, + AgentRequestStopParams, + AgentRequestTemplateParams, + AgentRequestToolsItemParams, + AgentsCallRequestToolChoiceParams, + AgentsCallStreamRequestToolChoiceParams, +) + +__all__ = [ + "AgentLogRequestToolChoice", + "AgentLogRequestToolChoiceParams", + "AgentRequestStop", + "AgentRequestStopParams", + "AgentRequestTemplate", + "AgentRequestTemplateParams", + "AgentRequestToolsItem", + "AgentRequestToolsItemParams", + "AgentsCallRequestToolChoice", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoice", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py new file mode 100644 index 00000000..8fd1bfb9 --- /dev/null +++ b/src/humanloop/agents/client.py @@ -0,0 +1,3679 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..requests.chat_message import ChatMessageParams +from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from ..requests.agent_kernel_request import AgentKernelRequestParams +import datetime as dt +from ..types.log_status import LogStatus +from ..core.request_options import RequestOptions +from ..types.create_agent_log_response import CreateAgentLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.log_response import LogResponse +from ..core.jsonable_encoder import jsonable_encoder +from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..types.agent_call_stream_response import AgentCallStreamResponse +import httpx_sse +import json +from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from ..types.agent_call_response import AgentCallResponse +from ..types.project_sort_by import ProjectSortBy +from ..types.sort_order import SortOrder +from ..types.paginated_data_agent_response import PaginatedDataAgentResponse +from ..types.model_endpoints import ModelEndpoints +from .requests.agent_request_template import AgentRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .requests.agent_request_stop import AgentRequestStopParams +from ..requests.response_format import ResponseFormatParams +from ..types.reasoning_effort import ReasoningEffort +from .requests.agent_request_tools_item import AgentRequestToolsItemParams +from ..types.agent_response import AgentResponse +from ..types.version_status import VersionStatus +from ..types.list_agents import ListAgents +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..core.client_wrapper import AsyncClientWrapper + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class AgentsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.log() + """ + _response = self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.update_log( + id="id", + log_id="log_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[AgentCallStreamResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[AgentCallStreamResponse] + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + response = client.agents.call_stream() + for chunk in response: + yield chunk + """ + with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + try: + yield typing.cast( + AgentCallStreamResponse, + construct_type( + type_=AgentCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except: + pass + return + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.call() + """ + _response = self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataAgentResponse: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataAgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list() + """ + _response = self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Agent. + + If you provide a commit message, then the new version will be committed; + otherwise it will be uncommitted. If you try to commit an already committed version, + an exception will be raised. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[ReasoningEffort] + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + commit_message : typing.Optional[str] + Message describing the changes made. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.upsert( + model="model", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": reasoning_effort, + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "commit_message": commit_message, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.get( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.delete( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.move( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + status: typing.Optional[VersionStatus] = None, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + status : typing.Optional[VersionStatus] + Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list_versions( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "status": status, + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def commit( + self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Commit a version of the Agent with a commit message. + + If the version is already committed, an exception will be raised. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + commit_message : str + Message describing the changes made. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.commit( + id="id", + version_id="version_id", + commit_message="commit_message", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", + method="POST", + json={ + "commit_message": commit_message, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.delete_agent_version( + id="id", + version_id="version_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.set_deployment( + id="id", + environment_id="environment_id", + version_id="version_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.remove_deployment( + id="id", + environment_id="environment_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.list_environments( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.agents.update_monitoring( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncAgentsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agent_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> CreateAgentLogResponse: + """ + Create an Agent Log. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise, the default deployed version will be chosen. + + If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agent_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + CreateAgentLogResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.log() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agent_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + CreateAgentLogResponse, + construct_type( + type_=CreateAgentLogResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_log( + self, + id: str, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> LogResponse: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + log_id : str + Unique identifier for the Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LogResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.update_log( + id="id", + log_id="log_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AgentCallStreamResponse]: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AgentCallStreamResponse] + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + response = await client.agents.call_stream() + async for chunk in response: + yield chunk + + + asyncio.run(main()) + """ + async with self._client_wrapper.httpx_client.stream( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + try: + yield typing.cast( + AgentCallStreamResponse, + construct_type( + type_=AgentCallStreamResponse, # type: ignore + object_=json.loads(_sse.data), + ), + ) + except: + pass + return + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT, + agent: typing.Optional[AgentKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + agents_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentCallResponse: + """ + Call an Agent. + + Calling an Agent calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Agent. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Agent details in the request body. In this case, we will check if the details correspond + to an existing version of the Agent. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Agent details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Agent to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + agent : typing.Optional[AgentKernelRequestParams] + Details of your Agent. A new Agent version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + agents_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentCallResponse + + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.call() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "agents/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write" + ), + "agent": convert_and_respect_annotation_metadata( + object_=agent, annotation=AgentKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": agents_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "return_inputs": return_inputs, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentCallResponse, + construct_type( + type_=AgentCallResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + user_filter: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> PaginatedDataAgentResponse: + """ + Get a list of all Agents. + + Parameters + ---------- + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Agents to fetch. + + name : typing.Optional[str] + Case-insensitive filter for Agent name. + + user_filter : typing.Optional[str] + Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort Agents by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + PaginatedDataAgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list() + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "user_filter": user_filter, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + PaginatedDataAgentResponse, + construct_type( + type_=PaginatedDataAgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[AgentRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[AgentRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + tools: typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_iterations: typing.Optional[int] = OMIT, + commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Create an Agent or update it with a new version if it already exists. + + Agents are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Agent. + + If you provide a commit message, then the new version will be committed; + otherwise it will be uncommitted. If you try to commit an already committed version, + an exception will be raised. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Agent. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[AgentRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[AgentRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[ReasoningEffort] + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + + tools : typing.Optional[typing.Sequence[AgentRequestToolsItemParams]] + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + max_iterations : typing.Optional[int] + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + + commit_message : typing.Optional[str] + Message describing the changes made. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.upsert( + model="model", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "agents", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=AgentRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=AgentRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": reasoning_effort, + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[AgentRequestToolsItemParams], direction="write" + ), + "attributes": attributes, + "max_iterations": max_iterations, + "commit_message": commit_message, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Retrieve the Agent with the given ID. + + By default, the deployed version of the Agent is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : typing.Optional[str] + A specific Version ID of the Agent to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.get( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete the Agent with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.delete( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Move the Agent to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.move( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + status: typing.Optional[VersionStatus] = None, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> ListAgents: + """ + Get a list of all the versions of a Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + status : typing.Optional[VersionStatus] + Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ListAgents + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list_versions( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "status": status, + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ListAgents, + construct_type( + type_=ListAgents, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def commit( + self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Commit a version of the Agent with a commit message. + + If the version is already committed, an exception will be raised. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + commit_message : str + Message describing the changes made. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.commit( + id="id", + version_id="version_id", + commit_message="commit_message", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", + method="POST", + json={ + "commit_message": commit_message, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_agent_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Delete a version of the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.delete_agent_version( + id="id", + version_id="version_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AgentResponse: + """ + Deploy Agent to an Environment. + + Set the deployed version for the specified Environment. This Agent + will be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.set_deployment( + id="id", + environment_id="environment_id", + version_id="version_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: + """ + Remove deployed Agent from the Environment. + + Remove the deployed version for the specified Environment. This Agent + will no longer be used for calls made to the Agent in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.remove_deployment( + id="id", + environment_id="environment_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentResponse]: + """ + List all Environments and their deployed versions for the Agent. + + Parameters + ---------- + id : str + Unique identifier for Agent. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentResponse] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.list_environments( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AgentResponse: + """ + Activate and deactivate Evaluators for monitoring the Agent. + + An activated Evaluator will automatically be run on all new Logs + within the Agent for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AgentResponse + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.agents.update_monitoring( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"agents/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + AgentResponse, + construct_type( + type_=AgentResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py new file mode 100644 index 00000000..f704e9cc --- /dev/null +++ b/src/humanloop/agents/requests/__init__.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams +from .agent_request_stop import AgentRequestStopParams +from .agent_request_template import AgentRequestTemplateParams +from .agent_request_tools_item import AgentRequestToolsItemParams +from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams + +__all__ = [ + "AgentLogRequestToolChoiceParams", + "AgentRequestStopParams", + "AgentRequestTemplateParams", + "AgentRequestToolsItemParams", + "AgentsCallRequestToolChoiceParams", + "AgentsCallStreamRequestToolChoiceParams", +] diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py new file mode 100644 index 00000000..584112aa --- /dev/null +++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentLogRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agent_request_stop.py b/src/humanloop/agents/requests/agent_request_stop.py new file mode 100644 index 00000000..3970451c --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py new file mode 100644 index 00000000..c251ce8e --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.chat_message import ChatMessageParams + +AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py new file mode 100644 index 00000000..20cde136 --- /dev/null +++ b/src/humanloop/agents/requests/agent_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams +from ...requests.agent_inline_tool import AgentInlineToolParams + +AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py new file mode 100644 index 00000000..1e468fa0 --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..bd068b6f --- /dev/null +++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...requests.tool_choice import ToolChoiceParams + +AgentsCallStreamRequestToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py new file mode 100644 index 00000000..14378369 --- /dev/null +++ b/src/humanloop/agents/types/__init__.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from .agent_log_request_tool_choice import AgentLogRequestToolChoice +from .agent_request_stop import AgentRequestStop +from .agent_request_template import AgentRequestTemplate +from .agent_request_tools_item import AgentRequestToolsItem +from .agents_call_request_tool_choice import AgentsCallRequestToolChoice +from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice + +__all__ = [ + "AgentLogRequestToolChoice", + "AgentRequestStop", + "AgentRequestTemplate", + "AgentRequestToolsItem", + "AgentsCallRequestToolChoice", + "AgentsCallStreamRequestToolChoice", +] diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py new file mode 100644 index 00000000..bfb576c2 --- /dev/null +++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentLogRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agent_request_stop.py b/src/humanloop/agents/types/agent_request_stop.py new file mode 100644 index 00000000..325a6b2e --- /dev/null +++ b/src/humanloop/agents/types/agent_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py new file mode 100644 index 00000000..f6474824 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.chat_message import ChatMessage + +AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py new file mode 100644 index 00000000..e6c54b88 --- /dev/null +++ b/src/humanloop/agents/types/agent_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.agent_linked_file_request import AgentLinkedFileRequest +from ...types.agent_inline_tool import AgentInlineTool + +AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py new file mode 100644 index 00000000..6dee5a04 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentsCallRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py new file mode 100644 index 00000000..83d264f0 --- /dev/null +++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ...types.tool_choice import ToolChoice + +AgentsCallStreamRequestToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py index 5a643570..28e647c6 100644 --- a/src/humanloop/base_client.py +++ b/src/humanloop/base_client.py @@ -11,6 +11,7 @@ from .datasets.client import DatasetsClient from .evaluators.client import EvaluatorsClient from .flows.client import FlowsClient +from .agents.client import AgentsClient from .directories.client import DirectoriesClient from .files.client import FilesClient from .evaluations.client import EvaluationsClient @@ -21,6 +22,7 @@ from .datasets.client import AsyncDatasetsClient from .evaluators.client import AsyncEvaluatorsClient from .flows.client import AsyncFlowsClient +from .agents.client import AsyncAgentsClient from .directories.client import AsyncDirectoriesClient from .files.client import AsyncFilesClient from .evaluations.client import AsyncEvaluationsClient @@ -94,6 +96,7 @@ def __init__( self.datasets = DatasetsClient(client_wrapper=self._client_wrapper) self.evaluators = EvaluatorsClient(client_wrapper=self._client_wrapper) self.flows = FlowsClient(client_wrapper=self._client_wrapper) + self.agents = AgentsClient(client_wrapper=self._client_wrapper) self.directories = DirectoriesClient(client_wrapper=self._client_wrapper) self.files = FilesClient(client_wrapper=self._client_wrapper) self.evaluations = EvaluationsClient(client_wrapper=self._client_wrapper) @@ -167,6 +170,7 @@ def __init__( self.datasets = AsyncDatasetsClient(client_wrapper=self._client_wrapper) self.evaluators = AsyncEvaluatorsClient(client_wrapper=self._client_wrapper) self.flows = AsyncFlowsClient(client_wrapper=self._client_wrapper) + self.agents = AsyncAgentsClient(client_wrapper=self._client_wrapper) self.directories = AsyncDirectoriesClient(client_wrapper=self._client_wrapper) self.files = AsyncFilesClient(client_wrapper=self._client_wrapper) self.evaluations = AsyncEvaluationsClient(client_wrapper=self._client_wrapper) diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index 01b7607d..837b5df3 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "humanloop", - "X-Fern-SDK-Version": "0.8.29", + "X-Fern-SDK-Version": "0.8.29b", } headers["X-API-KEY"] = self.api_key return headers diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py index 126d775b..bb388cdf 100644 --- a/src/humanloop/files/client.py +++ b/src/humanloop/files/client.py @@ -6,8 +6,8 @@ from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.request_options import RequestOptions -from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError @@ -37,7 +37,7 @@ def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse: + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: """ Get a paginated list of files. @@ -72,7 +72,7 @@ def list_files( Returns ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse Successful Response Examples @@ -102,9 +102,9 @@ def list_files( try: if 200 <= _response.status_code < 300: return typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore object_=_response.json(), ), ) @@ -216,7 +216,7 @@ async def list_files( sort_by: typing.Optional[ProjectSortBy] = None, order: typing.Optional[SortOrder] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse: + ) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse: """ Get a paginated list of files. @@ -251,7 +251,7 @@ async def list_files( Returns ------- - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse Successful Response Examples @@ -289,9 +289,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, # type: ignore object_=_response.json(), ), ) diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py index c1618edb..8c070ab3 100644 --- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py +++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -6,7 +6,13 @@ from ...requests.dataset_response import DatasetResponseParams from ...requests.evaluator_response import EvaluatorResponseParams from ...requests.flow_response import FlowResponseParams +from ...requests.agent_response import AgentResponseParams RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py index 48415fc9..46ea271a 100644 --- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py +++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py @@ -6,7 +6,8 @@ from ...types.dataset_response import DatasetResponse from ...types.evaluator_response import EvaluatorResponse from ...types.flow_response import FlowResponse +from ...types.agent_response import AgentResponse RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py index e0e9304e..5bde28d0 100644 --- a/src/humanloop/requests/__init__.py +++ b/src/humanloop/requests/__init__.py @@ -1,12 +1,32 @@ # This file was auto-generated by Fern from our API Definition. +from .agent_call_response import AgentCallResponseParams +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .agent_call_stream_response import AgentCallStreamResponseParams +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams from .agent_config_response import AgentConfigResponseParams +from .agent_inline_tool import AgentInlineToolParams +from .agent_kernel_request import AgentKernelRequestParams +from .agent_kernel_request_stop import AgentKernelRequestStopParams +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams +from .agent_linked_file_request import AgentLinkedFileRequestParams +from .agent_linked_file_response import AgentLinkedFileResponseParams +from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams +from .agent_log_response import AgentLogResponseParams +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_log_stream_response import AgentLogStreamResponseParams +from .agent_response import AgentResponseParams +from .agent_response_stop import AgentResponseStopParams +from .agent_response_template import AgentResponseTemplateParams +from .agent_response_tools_item import AgentResponseToolsItemParams from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams from .chat_message import ChatMessageParams from .chat_message_content import ChatMessageContentParams from .chat_message_content_item import ChatMessageContentItemParams from .code_evaluator_request import CodeEvaluatorRequestParams from .commit_request import CommitRequestParams +from .create_agent_log_response import CreateAgentLogResponseParams from .create_datapoint_request import CreateDatapointRequestParams from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams from .create_evaluator_log_response import CreateEvaluatorLogResponseParams @@ -52,6 +72,7 @@ from .external_evaluator_request import ExternalEvaluatorRequestParams from .file_environment_response import FileEnvironmentResponseParams from .file_environment_response_file import FileEnvironmentResponseFileParams +from .file_environment_variable_request import FileEnvironmentVariableRequestParams from .file_id import FileIdParams from .file_path import FilePathParams from .file_request import FileRequestParams @@ -65,7 +86,9 @@ from .image_chat_content import ImageChatContentParams from .image_url import ImageUrlParams from .input_response import InputResponseParams +from .linked_file_request import LinkedFileRequestParams from .linked_tool_response import LinkedToolResponseParams +from .list_agents import ListAgentsParams from .list_datasets import ListDatasetsParams from .list_evaluators import ListEvaluatorsParams from .list_flows import ListFlowsParams @@ -73,22 +96,24 @@ from .list_tools import ListToolsParams from .llm_evaluator_request import LlmEvaluatorRequestParams from .log_response import LogResponseParams +from .log_stream_response import LogStreamResponseParams from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams from .overall_stats import OverallStatsParams +from .paginated_data_agent_response import PaginatedDataAgentResponseParams from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponseParams from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponseParams from .paginated_data_flow_response import PaginatedDataFlowResponseParams from .paginated_data_log_response import PaginatedDataLogResponseParams from .paginated_data_prompt_response import PaginatedDataPromptResponseParams from .paginated_data_tool_response import PaginatedDataToolResponseParams -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams, ) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, ) from .paginated_datapoint_response import PaginatedDatapointResponseParams from .paginated_dataset_response import PaginatedDatasetResponseParams @@ -118,6 +143,7 @@ from .text_chat_content import TextChatContentParams from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams from .tool_call import ToolCallParams +from .tool_call_response import ToolCallResponseParams from .tool_choice import ToolChoiceParams from .tool_function import ToolFunctionParams from .tool_kernel_request import ToolKernelRequestParams @@ -135,13 +161,33 @@ from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams __all__ = [ + "AgentCallResponseParams", + "AgentCallResponseToolChoiceParams", + "AgentCallStreamResponseParams", + "AgentCallStreamResponsePayloadParams", "AgentConfigResponseParams", + "AgentInlineToolParams", + "AgentKernelRequestParams", + "AgentKernelRequestStopParams", + "AgentKernelRequestTemplateParams", + "AgentKernelRequestToolsItemParams", + "AgentLinkedFileRequestParams", + "AgentLinkedFileResponseFileParams", + "AgentLinkedFileResponseParams", + "AgentLogResponseParams", + "AgentLogResponseToolChoiceParams", + "AgentLogStreamResponseParams", + "AgentResponseParams", + "AgentResponseStopParams", + "AgentResponseTemplateParams", + "AgentResponseToolsItemParams", "BooleanEvaluatorStatsResponseParams", "ChatMessageContentItemParams", "ChatMessageContentParams", "ChatMessageParams", "CodeEvaluatorRequestParams", "CommitRequestParams", + "CreateAgentLogResponseParams", "CreateDatapointRequestParams", "CreateDatapointRequestTargetValueParams", "CreateEvaluatorLogResponseParams", @@ -181,6 +227,7 @@ "ExternalEvaluatorRequestParams", "FileEnvironmentResponseFileParams", "FileEnvironmentResponseParams", + "FileEnvironmentVariableRequestParams", "FileIdParams", "FilePathParams", "FileRequestParams", @@ -194,7 +241,9 @@ "ImageChatContentParams", "ImageUrlParams", "InputResponseParams", + "LinkedFileRequestParams", "LinkedToolResponseParams", + "ListAgentsParams", "ListDatasetsParams", "ListEvaluatorsParams", "ListFlowsParams", @@ -202,19 +251,21 @@ "ListToolsParams", "LlmEvaluatorRequestParams", "LogResponseParams", + "LogStreamResponseParams", "MonitoringEvaluatorEnvironmentRequestParams", "MonitoringEvaluatorResponseParams", "MonitoringEvaluatorVersionRequestParams", "NumericEvaluatorStatsResponseParams", "OverallStatsParams", + "PaginatedDataAgentResponseParams", "PaginatedDataEvaluationLogResponseParams", "PaginatedDataEvaluatorResponseParams", "PaginatedDataFlowResponseParams", "PaginatedDataLogResponseParams", "PaginatedDataPromptResponseParams", "PaginatedDataToolResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams", "PaginatedDatapointResponseParams", "PaginatedDatasetResponseParams", "PaginatedEvaluationResponseParams", @@ -243,6 +294,7 @@ "TextChatContentParams", "TextEvaluatorStatsResponseParams", "ToolCallParams", + "ToolCallResponseParams", "ToolChoiceParams", "ToolFunctionParams", "ToolKernelRequestParams", diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py new file mode 100644 index 00000000..2a1615b6 --- /dev/null +++ b/src/humanloop/requests/agent_call_response.py @@ -0,0 +1,111 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +import datetime as dt +import typing +from .chat_message import ChatMessageParams +from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams +from .prompt_response import PromptResponseParams +from ..types.log_status import LogStatus +from .prompt_call_log_response import PromptCallLogResponseParams + + +class AgentCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Prompt call with potentially multiple log samples. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentCallResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: PromptResponseParams + """ + Prompt used to generate the Log. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + ID of the log. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + ID of the Trace containing the Prompt Call Log. + """ + + logs: typing.Sequence[PromptCallLogResponseParams] + """ + The logs generated by the Prompt call. + """ diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py new file mode 100644 index 00000000..6cc9f9c4 --- /dev/null +++ b/src/humanloop/requests/agent_call_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoiceParams + +AgentCallResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py new file mode 100644 index 00000000..082d6265 --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams +from ..types.event_type import EventType + + +class AgentCallStreamResponseParams(typing_extensions.TypedDict): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing_extensions.NotRequired[AgentCallStreamResponsePayloadParams] + type: EventType diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py new file mode 100644 index 00000000..0e08a6f3 --- /dev/null +++ b/src/humanloop/requests/agent_call_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponseParams +from .log_response import LogResponseParams +from .tool_call import ToolCallParams + +AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams] diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py new file mode 100644 index 00000000..028c3d5c --- /dev/null +++ b/src/humanloop/requests/agent_inline_tool.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .tool_function import ToolFunctionParams + + +class AgentInlineToolParams(typing_extensions.TypedDict): + type: typing.Literal["inline"] + json_schema: ToolFunctionParams diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py new file mode 100644 index 00000000..c3c33a42 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request.py @@ -0,0 +1,112 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from .agent_kernel_request_template import AgentKernelRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .agent_kernel_request_stop import AgentKernelRequestStopParams +import typing +from .response_format import ResponseFormatParams +from ..types.reasoning_effort import ReasoningEffort +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams + + +class AgentKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentKernelRequestTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentKernelRequestStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[ReasoningEffort] + """ + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + """ + + tools: typing_extensions.NotRequired[typing.Sequence[AgentKernelRequestToolsItemParams]] + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ diff --git a/src/humanloop/requests/agent_kernel_request_stop.py b/src/humanloop/requests/agent_kernel_request_stop.py new file mode 100644 index 00000000..eae95d35 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py new file mode 100644 index 00000000..7261667d --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessageParams + +AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..27b63984 --- /dev/null +++ b/src/humanloop/requests/agent_kernel_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .agent_linked_file_request import AgentLinkedFileRequestParams +from .agent_inline_tool import AgentInlineToolParams + +AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams] diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py new file mode 100644 index 00000000..9addfe2a --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_request.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .linked_file_request import LinkedFileRequestParams + + +class AgentLinkedFileRequestParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py new file mode 100644 index 00000000..f04afcaf --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response.py @@ -0,0 +1,17 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing +from .linked_file_request import LinkedFileRequestParams +import typing_extensions +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams + + +class AgentLinkedFileResponseParams(typing_extensions.TypedDict): + type: typing.Literal["file"] + link: LinkedFileRequestParams + file: typing_extensions.NotRequired["AgentLinkedFileResponseFileParams"] diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py new file mode 100644 index 00000000..bb328de2 --- /dev/null +++ b/src/humanloop/requests/agent_linked_file_response_file.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .dataset_response import DatasetResponseParams +import typing + +if typing.TYPE_CHECKING: + from .prompt_response import PromptResponseParams + from .tool_response import ToolResponseParams + from .evaluator_response import EvaluatorResponseParams + from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams +AgentLinkedFileResponseFileParams = typing.Union[ + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", +] diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py new file mode 100644 index 00000000..4e3a51b7 --- /dev/null +++ b/src/humanloop/requests/agent_log_response.py @@ -0,0 +1,201 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing_extensions +from .chat_message import ChatMessageParams +import typing +from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams +from .agent_response import AgentResponseParams +import datetime as dt +from ..types.log_status import LogStatus +import typing + +if typing.TYPE_CHECKING: + from .evaluator_log_response import EvaluatorLogResponseParams + from .log_response import LogResponseParams + + +class AgentLogResponseParams(typing_extensions.TypedDict): + """ + General request for creating a Log + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]] + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing_extensions.NotRequired[AgentLogResponseToolChoiceParams] + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponseParams + """ + Agent that generated the Log. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.Sequence["EvaluatorLogResponseParams"] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence["LogResponseParams"]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py new file mode 100644 index 00000000..e239a69c --- /dev/null +++ b/src/humanloop/requests/agent_log_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoiceParams + +AgentLogResponseToolChoiceParams = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams +] diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py new file mode 100644 index 00000000..710d55cf --- /dev/null +++ b/src/humanloop/requests/agent_log_stream_response.py @@ -0,0 +1,87 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +import datetime as dt +from .chat_message import ChatMessageParams + + +class AgentLogStreamResponseParams(typing_extensions.TypedDict): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + output_message: typing_extensions.NotRequired[ChatMessageParams] + """ + The message returned by the provider. + """ + + prompt_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing_extensions.NotRequired[int] + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing_extensions.NotRequired[int] + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing_extensions.NotRequired[float] + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing_extensions.NotRequired[str] + """ + Reason the generation finished. + """ + + id: str + """ + ID of the log. + """ + + agent_id: str + """ + ID of the Agent the log belongs to. + """ + + version_id: str + """ + ID of the specific version of the Agent. + """ diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py new file mode 100644 index 00000000..07e6413e --- /dev/null +++ b/src/humanloop/requests/agent_response.py @@ -0,0 +1,242 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing_extensions +import typing_extensions +from ..types.model_endpoints import ModelEndpoints +from .agent_response_template import AgentResponseTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .agent_response_stop import AgentResponseStopParams +import typing +from .response_format import ResponseFormatParams +from ..types.reasoning_effort import ReasoningEffort +from .environment_response import EnvironmentResponseParams +import datetime as dt +from ..types.user_response import UserResponse +from ..types.version_status import VersionStatus +from .input_response import InputResponseParams +from .evaluator_aggregate import EvaluatorAggregateParams +import typing + +if typing.TYPE_CHECKING: + from .agent_response_tools_item import AgentResponseToolsItemParams + from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams + + +class AgentResponseParams(typing_extensions.TypedDict): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str + """ + Unique identifier for the Agent. + """ + + directory_id: typing_extensions.NotRequired[str] + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing_extensions.NotRequired[ModelEndpoints] + """ + The provider model endpoint used. + """ + + template: typing_extensions.NotRequired[AgentResponseTemplateParams] + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing_extensions.NotRequired[TemplateLanguage] + """ + The template language to use for rendering the template. + """ + + provider: typing_extensions.NotRequired[ModelProviders] + """ + The company providing the underlying model service. + """ + + max_tokens: typing_extensions.NotRequired[int] + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing_extensions.NotRequired[float] + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing_extensions.NotRequired[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing_extensions.NotRequired[AgentResponseStopParams] + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing_extensions.NotRequired[float] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing_extensions.NotRequired[int] + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing_extensions.NotRequired[ResponseFormatParams] + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing_extensions.NotRequired[ReasoningEffort] + """ + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + """ + + tools: typing.Sequence["AgentResponseToolsItemParams"] + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing_extensions.NotRequired[int] + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + commit_message: typing_extensions.NotRequired[str] + """ + Message describing the changes made. + """ + + version_name: typing_extensions.NotRequired[str] + """ + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the Version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the Agent. + """ + + tags: typing_extensions.NotRequired[typing.Sequence[str]] + """ + List of tags associated with the file. + """ + + readme: typing_extensions.NotRequired[str] + """ + Long description of the file. + """ + + name: str + """ + Name of the Agent. + """ + + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + + version_id: str + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing_extensions.NotRequired[typing.Literal["agent"]] + environments: typing_extensions.NotRequired[typing.Sequence[EnvironmentResponseParams]] + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing_extensions.NotRequired[UserResponse] + """ + The user who created the Agent. + """ + + committed_by: typing_extensions.NotRequired[UserResponse] + """ + The user who committed the Agent Version. + """ + + committed_at: typing_extensions.NotRequired[dt.datetime] + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.Sequence[InputResponseParams] + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing_extensions.NotRequired[typing.Sequence["MonitoringEvaluatorResponseParams"]] + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing_extensions.NotRequired[typing.Sequence[EvaluatorAggregateParams]] + """ + Aggregation of Evaluator results for the Agent Version. + """ diff --git a/src/humanloop/requests/agent_response_stop.py b/src/humanloop/requests/agent_response_stop.py new file mode 100644 index 00000000..a395ee73 --- /dev/null +++ b/src/humanloop/requests/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStopParams = typing.Union[str, typing.Sequence[str]] diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py new file mode 100644 index 00000000..94be65f1 --- /dev/null +++ b/src/humanloop/requests/agent_response_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessageParams + +AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]] diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py new file mode 100644 index 00000000..5181579b --- /dev/null +++ b/src/humanloop/requests/agent_response_tools_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .agent_inline_tool import AgentInlineToolParams +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponseParams +AgentResponseToolsItemParams = typing.Union["AgentLinkedFileResponseParams", AgentInlineToolParams] diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py new file mode 100644 index 00000000..203aa387 --- /dev/null +++ b/src/humanloop/requests/create_agent_log_response.py @@ -0,0 +1,31 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +from ..types.log_status import LogStatus + + +class CreateAgentLogResponseParams(typing_extensions.TypedDict): + """ + Response for an Agent Log. + """ + + id: str + """ + Unique identifier for the Log. + """ + + agent_id: str + """ + Unique identifier for the Agent. + """ + + version_id: str + """ + Unique identifier for the Flow Version. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + """ diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py index 88f95068..413e188c 100644 --- a/src/humanloop/requests/dataset_response.py +++ b/src/humanloop/requests/dataset_response.py @@ -43,6 +43,11 @@ class DatasetResponseParams(typing_extensions.TypedDict): Description of the Dataset. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py index f101bf15..db9370b9 100644 --- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py +++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py @@ -6,7 +6,13 @@ from .evaluator_response import EvaluatorResponseParams from .dataset_response import DatasetResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[ - PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, DatasetResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + EvaluatorResponseParams, + DatasetResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py index fe1230b6..3037a82a 100644 --- a/src/humanloop/requests/evaluator_response.py +++ b/src/humanloop/requests/evaluator_response.py @@ -53,6 +53,11 @@ class EvaluatorResponseParams(typing_extensions.TypedDict): Description of the Evaluator. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py index 4ac6b0c3..04c0b51d 100644 --- a/src/humanloop/requests/file_environment_response_file.py +++ b/src/humanloop/requests/file_environment_response_file.py @@ -6,7 +6,13 @@ from .dataset_response import DatasetResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams FileEnvironmentResponseFileParams = typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, ] diff --git a/src/humanloop/requests/file_environment_variable_request.py b/src/humanloop/requests/file_environment_variable_request.py new file mode 100644 index 00000000..bb70bda4 --- /dev/null +++ b/src/humanloop/requests/file_environment_variable_request.py @@ -0,0 +1,15 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions + + +class FileEnvironmentVariableRequestParams(typing_extensions.TypedDict): + name: str + """ + Name of the environment variable. + """ + + value: str + """ + Value of the environment variable. + """ diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py index 0adbc54c..05228d62 100644 --- a/src/humanloop/requests/flow_response.py +++ b/src/humanloop/requests/flow_response.py @@ -55,6 +55,11 @@ class FlowResponseParams(typing_extensions.TypedDict): Description of the Flow. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the File. + """ + readme: typing_extensions.NotRequired[str] """ Long description of the file. diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py new file mode 100644 index 00000000..2bbba19c --- /dev/null +++ b/src/humanloop/requests/linked_file_request.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions + + +class LinkedFileRequestParams(typing_extensions.TypedDict): + file_id: str + environment_id: typing_extensions.NotRequired[str] + version_id: typing_extensions.NotRequired[str] diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py new file mode 100644 index 00000000..4a72f1db --- /dev/null +++ b/src/humanloop/requests/list_agents.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .agent_response import AgentResponseParams + + +class ListAgentsParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + """ + The list of Agents. + """ diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py index 15a4cff6..cb3ce212 100644 --- a/src/humanloop/requests/log_response.py +++ b/src/humanloop/requests/log_response.py @@ -9,6 +9,11 @@ from .tool_log_response import ToolLogResponseParams from .evaluator_log_response import EvaluatorLogResponseParams from .flow_log_response import FlowLogResponseParams + from .agent_log_response import AgentLogResponseParams LogResponseParams = typing.Union[ - "PromptLogResponseParams", "ToolLogResponseParams", "EvaluatorLogResponseParams", "FlowLogResponseParams" + "PromptLogResponseParams", + "ToolLogResponseParams", + "EvaluatorLogResponseParams", + "FlowLogResponseParams", + "AgentLogResponseParams", ] diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py new file mode 100644 index 00000000..e142e7fb --- /dev/null +++ b/src/humanloop/requests/log_stream_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .prompt_call_stream_response import PromptCallStreamResponseParams +from .agent_log_stream_response import AgentLogStreamResponseParams + +LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams] diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py new file mode 100644 index 00000000..c8d67533 --- /dev/null +++ b/src/humanloop/requests/paginated_data_agent_response.py @@ -0,0 +1,12 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing +from .agent_response import AgentResponseParams + + +class PaginatedDataAgentResponseParams(typing_extensions.TypedDict): + records: typing.Sequence[AgentResponseParams] + page: int + size: int + total: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py similarity index 65% rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py index cf8bc4bf..0e7adb64 100644 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -2,16 +2,16 @@ import typing_extensions import typing -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams, ) -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseParams( +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseParams( typing_extensions.TypedDict ): records: typing.Sequence[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams ] page: int size: int diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py similarity index 58% rename from src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py rename to src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py index 1ba74108..b43a5521 100644 --- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py +++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -6,9 +6,13 @@ from .dataset_response import DatasetResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItemParams = ( - typing.Union[ - PromptResponseParams, ToolResponseParams, DatasetResponseParams, EvaluatorResponseParams, FlowResponseParams - ] -) +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[ + PromptResponseParams, + ToolResponseParams, + DatasetResponseParams, + EvaluatorResponseParams, + FlowResponseParams, + AgentResponseParams, +] diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py index 9faac4f7..be6efc8d 100644 --- a/src/humanloop/requests/populate_template_response.py +++ b/src/humanloop/requests/populate_template_response.py @@ -165,6 +165,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict): Name of the Prompt. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + version_id: str """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py index 61355166..e6bdce36 100644 --- a/src/humanloop/requests/prompt_kernel_request.py +++ b/src/humanloop/requests/prompt_kernel_request.py @@ -14,6 +14,12 @@ class PromptKernelRequestParams(typing_extensions.TypedDict): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + model: str """ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py index 15c447b4..45b1db22 100644 --- a/src/humanloop/requests/prompt_response.py +++ b/src/humanloop/requests/prompt_response.py @@ -168,6 +168,11 @@ class PromptResponseParams(typing_extensions.TypedDict): Name of the Prompt. """ + schema: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The JSON schema for the Prompt. + """ + version_id: str """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py index 879ea25c..569d0d76 100644 --- a/src/humanloop/requests/run_version_response.py +++ b/src/humanloop/requests/run_version_response.py @@ -5,7 +5,8 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams +from .agent_response import AgentResponseParams RunVersionResponseParams = typing.Union[ - PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams + PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams ] diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py new file mode 100644 index 00000000..1c92b28f --- /dev/null +++ b/src/humanloop/requests/tool_call_response.py @@ -0,0 +1,146 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions +import datetime as dt +from .tool_response import ToolResponseParams +import typing +from ..types.log_status import LogStatus +from .evaluator_log_response import EvaluatorLogResponseParams +from .log_response import LogResponseParams + + +class ToolCallResponseParams(typing_extensions.TypedDict): + """ + Response model for a Tool call. + """ + + start_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event started. + """ + + end_time: typing_extensions.NotRequired[dt.datetime] + """ + When the logged event ended. + """ + + tool: ToolResponseParams + """ + Tool used to generate the Log. + """ + + output: typing_extensions.NotRequired[str] + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing_extensions.NotRequired[dt.datetime] + """ + User defined timestamp for when the log was created. + """ + + error: typing_extensions.NotRequired[str] + """ + Error message if the log is an error. + """ + + provider_latency: typing_extensions.NotRequired[float] + """ + Duration of the logged event in seconds. + """ + + stdout: typing_extensions.NotRequired[str] + """ + Captured log and debug statements. + """ + + provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw request sent to provider. + """ + + provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Raw response received the provider. + """ + + inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + The inputs passed to the prompt template. + """ + + source: typing_extensions.NotRequired[str] + """ + Identifies where the model was called from. + """ + + metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]] + """ + Any additional metadata to record. + """ + + log_status: typing_extensions.NotRequired[LogStatus] + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing_extensions.NotRequired[str] + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing_extensions.NotRequired[str] + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing_extensions.NotRequired[typing.Sequence[str]] + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing_extensions.NotRequired[str] + """ + End-user ID related to the Log. + """ + + environment: typing_extensions.NotRequired[str] + """ + The name of the Environment the Log is associated to. + """ + + save: typing_extensions.NotRequired[bool] + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing_extensions.NotRequired[str] + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str + """ + ID of the log. + """ + + evaluator_logs: typing.Sequence[EvaluatorLogResponseParams] + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing_extensions.NotRequired[str] + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing_extensions.NotRequired[str] + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]] + """ + Logs nested under this Log in the Trace. + """ diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py index 8a16af00..9659cb49 100644 --- a/src/humanloop/requests/version_deployment_response_file.py +++ b/src/humanloop/requests/version_deployment_response_file.py @@ -10,6 +10,12 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams VersionDeploymentResponseFileParams = typing.Union[ - "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams" + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", ] diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py index 50ecf7bc..9c317679 100644 --- a/src/humanloop/requests/version_id_response_version.py +++ b/src/humanloop/requests/version_id_response_version.py @@ -10,6 +10,12 @@ from .tool_response import ToolResponseParams from .evaluator_response import EvaluatorResponseParams from .flow_response import FlowResponseParams + from .agent_response import AgentResponseParams VersionIdResponseVersionParams = typing.Union[ - "PromptResponseParams", "ToolResponseParams", DatasetResponseParams, "EvaluatorResponseParams", "FlowResponseParams" + "PromptResponseParams", + "ToolResponseParams", + DatasetResponseParams, + "EvaluatorResponseParams", + "FlowResponseParams", + "AgentResponseParams", ] diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py index a3e54db4..d004b586 100644 --- a/src/humanloop/tools/client.py +++ b/src/humanloop/tools/client.py @@ -2,17 +2,18 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from ..requests.tool_kernel_request import ToolKernelRequestParams import datetime as dt from ..types.log_status import LogStatus -from ..requests.tool_kernel_request import ToolKernelRequestParams from ..core.request_options import RequestOptions -from ..types.create_tool_log_response import CreateToolLogResponse +from ..types.tool_call_response import ToolCallResponse from ..core.serialization import convert_and_respect_annotation_metadata from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError +from ..types.create_tool_log_response import CreateToolLogResponse from ..types.log_response import LogResponse from ..core.jsonable_encoder import jsonable_encoder from ..types.project_sort_by import ProjectSortBy @@ -31,6 +32,8 @@ from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( EvaluatorActivationDeactivationRequestDeactivateItemParams, ) +from ..types.file_environment_variable_request import FileEnvironmentVariableRequest +from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams from ..core.client_wrapper import AsyncClientWrapper from ..core.pagination import AsyncPager @@ -42,6 +45,167 @@ class ToolsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolCallResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.call() + """ + _response = self._client_wrapper.httpx_client.request( + "tools/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ToolCallResponse, + construct_type( + type_=ToolCallResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def log( self, *, @@ -49,6 +213,7 @@ def log( environment: typing.Optional[str] = None, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, start_time: typing.Optional[dt.datetime] = OMIT, end_time: typing.Optional[dt.datetime] = OMIT, output: typing.Optional[str] = OMIT, @@ -68,7 +233,6 @@ def log( tool_log_request_environment: typing.Optional[str] = OMIT, save: typing.Optional[bool] = OMIT, log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateToolLogResponse: """ @@ -96,6 +260,9 @@ def log( id : typing.Optional[str] ID for an existing Tool. + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + start_time : typing.Optional[dt.datetime] When the logged event started. @@ -153,9 +320,6 @@ def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -201,6 +365,9 @@ def log( json={ "path": path, "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), "start_time": start_time, "end_time": end_time, "output": output, @@ -220,9 +387,6 @@ def log( "environment": tool_log_request_environment, "save": save, "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), }, headers={ "content-type": "application/json", @@ -1246,43 +1410,405 @@ def update_monitoring( activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] Evaluators to activate for Monitoring. These will be automatically run on new Logs. - deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] - Evaluators to deactivate. These will not be run on new Logs. + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + ToolResponse + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.update_monitoring( + id="tl_789ghi", + activate=[{"evaluator_version_id": "evv_1abc4308abd"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.get_environment_variables( + id="id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + from humanloop import Humanloop + + client = Humanloop( + api_key="YOUR_API_KEY", + ) + client.tools.delete_environment_variable( + id="id", + name="name", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncToolsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolCallResponse: + """ + Call a Tool. + + Calling a Tool with inputs runs the tool's source code and logs the result and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to call. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to call. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponse + ToolCallResponse Successful Response Examples -------- - from humanloop import Humanloop + import asyncio - client = Humanloop( + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( api_key="YOUR_API_KEY", ) - client.tools.update_monitoring( - id="tl_789ghi", - activate=[{"evaluator_version_id": "evv_1abc4308abd"}], - ) + + + async def main() -> None: + await client.tools.call() + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/evaluators", + _response = await self._client_wrapper.httpx_client.request( + "tools/call", method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", + "path": path, + "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_call_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", }, request_options=request_options, omit=OMIT, @@ -1290,9 +1816,9 @@ def update_monitoring( try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponse, + ToolCallResponse, construct_type( - type_=ToolResponse, # type: ignore + type_=ToolCallResponse, # type: ignore object_=_response.json(), ), ) @@ -1311,11 +1837,6 @@ def update_monitoring( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncToolsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - async def log( self, *, @@ -1323,6 +1844,7 @@ async def log( environment: typing.Optional[str] = None, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, start_time: typing.Optional[dt.datetime] = OMIT, end_time: typing.Optional[dt.datetime] = OMIT, output: typing.Optional[str] = OMIT, @@ -1342,7 +1864,6 @@ async def log( tool_log_request_environment: typing.Optional[str] = OMIT, save: typing.Optional[bool] = OMIT, log_id: typing.Optional[str] = OMIT, - tool: typing.Optional[ToolKernelRequestParams] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CreateToolLogResponse: """ @@ -1370,6 +1891,9 @@ async def log( id : typing.Optional[str] ID for an existing Tool. + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + start_time : typing.Optional[dt.datetime] When the logged event started. @@ -1427,9 +1951,6 @@ async def log( log_id : typing.Optional[str] This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. - tool : typing.Optional[ToolKernelRequestParams] - Details of your Tool. A new Tool version will be created if the provided details are new. - request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1483,6 +2004,9 @@ async def main() -> None: json={ "path": path, "id": id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), "start_time": start_time, "end_time": end_time, "output": output, @@ -1502,9 +2026,6 @@ async def main() -> None: "environment": tool_log_request_environment, "save": save, "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), }, headers={ "content-type": "application/json", @@ -2699,3 +3220,215 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_environment_variables( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.get_environment_variables( + id="id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_environment_variable( + self, + id: str, + *, + request: typing.Sequence[FileEnvironmentVariableRequestParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Add an environment variable to a Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request : typing.Sequence[FileEnvironmentVariableRequestParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.add_environment_variable( + id="id", + request=[{"name": "name", "value": "value"}], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables", + method="POST", + json=convert_and_respect_annotation_metadata( + object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write" + ), + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_environment_variable( + self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[FileEnvironmentVariableRequest]: + """ + Parameters + ---------- + id : str + Unique identifier for File. + + name : str + Name of the Environment Variable to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.List[FileEnvironmentVariableRequest] + Successful Response + + Examples + -------- + import asyncio + + from humanloop import AsyncHumanloop + + client = AsyncHumanloop( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.tools.delete_environment_variable( + id="id", + name="name", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environment-variables/{jsonable_encoder(name)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.List[FileEnvironmentVariableRequest], + construct_type( + type_=typing.List[FileEnvironmentVariableRequest], # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py index 99475cde..3e286ac6 100644 --- a/src/humanloop/types/__init__.py +++ b/src/humanloop/types/__init__.py @@ -1,6 +1,25 @@ # This file was auto-generated by Fern from our API Definition. +from .agent_call_response import AgentCallResponse +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +from .agent_call_stream_response import AgentCallStreamResponse +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload from .agent_config_response import AgentConfigResponse +from .agent_inline_tool import AgentInlineTool +from .agent_kernel_request import AgentKernelRequest +from .agent_kernel_request_stop import AgentKernelRequestStop +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from .agent_linked_file_request import AgentLinkedFileRequest +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_linked_file_response_file import AgentLinkedFileResponseFile +from .agent_log_response import AgentLogResponse +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +from .agent_log_stream_response import AgentLogStreamResponse +from .agent_response import AgentResponse +from .agent_response_stop import AgentResponseStop +from .agent_response_template import AgentResponseTemplate +from .agent_response_tools_item import AgentResponseToolsItem from .base_models_user_response import BaseModelsUserResponse from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse from .chat_message import ChatMessage @@ -11,6 +30,7 @@ from .code_evaluator_request import CodeEvaluatorRequest from .commit_request import CommitRequest from .config_tool_response import ConfigToolResponse +from .create_agent_log_response import CreateAgentLogResponse from .create_datapoint_request import CreateDatapointRequest from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue from .create_evaluator_log_response import CreateEvaluatorLogResponse @@ -57,10 +77,12 @@ from .evaluator_return_type_enum import EvaluatorReturnTypeEnum from .evaluator_version_id import EvaluatorVersionId from .evaluators_request import EvaluatorsRequest +from .event_type import EventType from .external_evaluator_request import ExternalEvaluatorRequest from .feedback_type import FeedbackType from .file_environment_response import FileEnvironmentResponse from .file_environment_response_file import FileEnvironmentResponseFile +from .file_environment_variable_request import FileEnvironmentVariableRequest from .file_id import FileId from .file_path import FilePath from .file_request import FileRequest @@ -78,7 +100,9 @@ from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .input_response import InputResponse +from .linked_file_request import LinkedFileRequest from .linked_tool_response import LinkedToolResponse +from .list_agents import ListAgents from .list_datasets import ListDatasets from .list_evaluators import ListEvaluators from .list_flows import ListFlows @@ -87,6 +111,7 @@ from .llm_evaluator_request import LlmEvaluatorRequest from .log_response import LogResponse from .log_status import LogStatus +from .log_stream_response import LogStreamResponse from .model_endpoints import ModelEndpoints from .model_providers import ModelProviders from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest @@ -96,17 +121,18 @@ from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse from .observability_status import ObservabilityStatus from .overall_stats import OverallStats +from .paginated_data_agent_response import PaginatedDataAgentResponse from .paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse from .paginated_data_evaluator_response import PaginatedDataEvaluatorResponse from .paginated_data_flow_response import PaginatedDataFlowResponse from .paginated_data_log_response import PaginatedDataLogResponse from .paginated_data_prompt_response import PaginatedDataPromptResponse from .paginated_data_tool_response import PaginatedDataToolResponse -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, ) from .paginated_datapoint_response import PaginatedDatapointResponse from .paginated_dataset_response import PaginatedDatasetResponse @@ -145,6 +171,7 @@ from .text_evaluator_stats_response import TextEvaluatorStatsResponse from .time_unit import TimeUnit from .tool_call import ToolCall +from .tool_call_response import ToolCallResponse from .tool_choice import ToolChoice from .tool_function import ToolFunction from .tool_kernel_request import ToolKernelRequest @@ -167,7 +194,26 @@ from .version_status import VersionStatus __all__ = [ + "AgentCallResponse", + "AgentCallResponseToolChoice", + "AgentCallStreamResponse", + "AgentCallStreamResponsePayload", "AgentConfigResponse", + "AgentInlineTool", + "AgentKernelRequest", + "AgentKernelRequestStop", + "AgentKernelRequestTemplate", + "AgentKernelRequestToolsItem", + "AgentLinkedFileRequest", + "AgentLinkedFileResponse", + "AgentLinkedFileResponseFile", + "AgentLogResponse", + "AgentLogResponseToolChoice", + "AgentLogStreamResponse", + "AgentResponse", + "AgentResponseStop", + "AgentResponseTemplate", + "AgentResponseToolsItem", "BaseModelsUserResponse", "BooleanEvaluatorStatsResponse", "ChatMessage", @@ -178,6 +224,7 @@ "CodeEvaluatorRequest", "CommitRequest", "ConfigToolResponse", + "CreateAgentLogResponse", "CreateDatapointRequest", "CreateDatapointRequestTargetValue", "CreateEvaluatorLogResponse", @@ -222,10 +269,12 @@ "EvaluatorReturnTypeEnum", "EvaluatorVersionId", "EvaluatorsRequest", + "EventType", "ExternalEvaluatorRequest", "FeedbackType", "FileEnvironmentResponse", "FileEnvironmentResponseFile", + "FileEnvironmentVariableRequest", "FileId", "FilePath", "FileRequest", @@ -243,7 +292,9 @@ "ImageUrl", "ImageUrlDetail", "InputResponse", + "LinkedFileRequest", "LinkedToolResponse", + "ListAgents", "ListDatasets", "ListEvaluators", "ListFlows", @@ -252,6 +303,7 @@ "LlmEvaluatorRequest", "LogResponse", "LogStatus", + "LogStreamResponse", "ModelEndpoints", "ModelProviders", "MonitoringEvaluatorEnvironmentRequest", @@ -261,14 +313,15 @@ "NumericEvaluatorStatsResponse", "ObservabilityStatus", "OverallStats", + "PaginatedDataAgentResponse", "PaginatedDataEvaluationLogResponse", "PaginatedDataEvaluatorResponse", "PaginatedDataFlowResponse", "PaginatedDataLogResponse", "PaginatedDataPromptResponse", "PaginatedDataToolResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse", - "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse", + "PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem", "PaginatedDatapointResponse", "PaginatedDatasetResponse", "PaginatedEvaluationResponse", @@ -306,6 +359,7 @@ "TextEvaluatorStatsResponse", "TimeUnit", "ToolCall", + "ToolCallResponse", "ToolChoice", "ToolFunction", "ToolKernelRequest", diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py new file mode 100644 index 00000000..43d31edf --- /dev/null +++ b/src/humanloop/types/agent_call_response.py @@ -0,0 +1,142 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +import datetime as dt +import pydantic +from .chat_message import ChatMessage +from .agent_call_response_tool_choice import AgentCallResponseToolChoice +from .log_status import LogStatus +from .prompt_call_log_response import PromptCallLogResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class AgentCallResponse(UncheckedBaseModel): + """ + Response model for a Prompt call with potentially multiple log samples. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentCallResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + prompt: PromptResponse = pydantic.Field() + """ + Prompt used to generate the Log. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the Trace containing the Prompt Call Log. + """ + + logs: typing.List[PromptCallLogResponse] = pydantic.Field() + """ + The logs generated by the Prompt call. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(AgentLinkedFileResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(AgentResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(EvaluatorResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(FlowResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(MonitoringEvaluatorResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(PromptResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(ToolResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(VersionDeploymentResponse, AgentCallResponse=AgentCallResponse) +update_forward_refs(VersionIdResponse, AgentCallResponse=AgentCallResponse) diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py new file mode 100644 index 00000000..95eca73e --- /dev/null +++ b/src/humanloop/types/agent_call_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoice + +AgentCallResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py new file mode 100644 index 00000000..9d0304e8 --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response.py @@ -0,0 +1,60 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse +from .evaluator_log_response import EvaluatorLogResponse +from .evaluator_response import EvaluatorResponse +from .flow_log_response import FlowLogResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_log_response import PromptLogResponse +from .prompt_response import PromptResponse +from .tool_log_response import ToolLogResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from .agent_call_stream_response_payload import AgentCallStreamResponsePayload +from .event_type import EventType +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentCallStreamResponse(UncheckedBaseModel): + """ + Response model for calling Agent in streaming mode. + """ + + log_id: str + message: str + payload: typing.Optional[AgentCallStreamResponsePayload] = None + type: EventType + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(AgentLinkedFileResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(AgentLogResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(AgentResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(EvaluatorLogResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(EvaluatorResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(FlowLogResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(FlowResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(MonitoringEvaluatorResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(PromptLogResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(PromptResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(ToolLogResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(ToolResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(VersionDeploymentResponse, AgentCallStreamResponse=AgentCallStreamResponse) +update_forward_refs(VersionIdResponse, AgentCallStreamResponse=AgentCallStreamResponse) diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py new file mode 100644 index 00000000..85422047 --- /dev/null +++ b/src/humanloop/types/agent_call_stream_response_payload.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .log_stream_response import LogStreamResponse +from .log_response import LogResponse +from .tool_call import ToolCall + +AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall] diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py new file mode 100644 index 00000000..19daddb8 --- /dev/null +++ b/src/humanloop/types/agent_inline_tool.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .tool_function import ToolFunction +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentInlineTool(UncheckedBaseModel): + type: typing.Literal["inline"] = "inline" + json_schema: ToolFunction + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py new file mode 100644 index 00000000..77d3d042 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request.py @@ -0,0 +1,122 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .model_endpoints import ModelEndpoints +from .agent_kernel_request_template import AgentKernelRequestTemplate +from .template_language import TemplateLanguage +from .model_providers import ModelProviders +from .agent_kernel_request_stop import AgentKernelRequestStop +from .response_format import ResponseFormat +from .reasoning_effort import ReasoningEffort +from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentKernelRequestTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentKernelRequestStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None) + """ + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + """ + + tools: typing.Optional[typing.List[AgentKernelRequestToolsItem]] = None + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_kernel_request_stop.py b/src/humanloop/types/agent_kernel_request_stop.py new file mode 100644 index 00000000..e38c12e2 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentKernelRequestStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py new file mode 100644 index 00000000..31a351f2 --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessage + +AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py new file mode 100644 index 00000000..82c2fecf --- /dev/null +++ b/src/humanloop/types/agent_kernel_request_tools_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .agent_linked_file_request import AgentLinkedFileRequest +from .agent_inline_tool import AgentInlineTool + +AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool] diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py new file mode 100644 index 00000000..e014464d --- /dev/null +++ b/src/humanloop/types/agent_linked_file_request.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .linked_file_request import LinkedFileRequest +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class AgentLinkedFileRequest(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py new file mode 100644 index 00000000..6e7c71ac --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .linked_file_request import LinkedFileRequest +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class AgentLinkedFileResponse(UncheckedBaseModel): + type: typing.Literal["file"] = "file" + link: LinkedFileRequest + file: typing.Optional["AgentLinkedFileResponseFile"] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_response import AgentResponse # noqa: E402 +from .evaluator_response import EvaluatorResponse # noqa: E402 +from .flow_response import FlowResponse # noqa: E402 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 +from .prompt_response import PromptResponse # noqa: E402 +from .tool_response import ToolResponse # noqa: E402 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402 +from .version_id_response import VersionIdResponse # noqa: E402 +from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402 + +update_forward_refs(AgentResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(EvaluatorResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(FlowResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(MonitoringEvaluatorResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(PromptResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(ToolResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(VersionDeploymentResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(VersionIdResponse, AgentLinkedFileResponse=AgentLinkedFileResponse) +update_forward_refs(AgentLinkedFileResponse) diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py new file mode 100644 index 00000000..42d38fe4 --- /dev/null +++ b/src/humanloop/types/agent_linked_file_response_file.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .dataset_response import DatasetResponse +import typing + +if typing.TYPE_CHECKING: + from .prompt_response import PromptResponse + from .tool_response import ToolResponse + from .evaluator_response import EvaluatorResponse + from .flow_response import FlowResponse + from .agent_response import AgentResponse +AgentLinkedFileResponseFile = typing.Union[ + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" +] diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py new file mode 100644 index 00000000..63f0868d --- /dev/null +++ b/src/humanloop/types/agent_log_response.py @@ -0,0 +1,237 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from .chat_message import ChatMessage +import pydantic +from .agent_log_response_tool_choice import AgentLogResponseToolChoice +import datetime as dt +from .log_status import LogStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class AgentLogResponse(UncheckedBaseModel): + """ + General request for creating a Log + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None) + """ + The messages passed to the to provider chat endpoint. + """ + + tool_choice: typing.Optional[AgentLogResponseToolChoice] = pydantic.Field(default=None) + """ + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model can decide to call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + """ + + agent: AgentResponse = pydantic.Field() + """ + Agent that generated the Log. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Trace that the Log belongs to. + """ + + trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 +from .flow_log_response import FlowLogResponse # noqa: E402 +from .prompt_log_response import PromptLogResponse # noqa: E402 +from .tool_log_response import ToolLogResponse # noqa: E402 +from .log_response import LogResponse # noqa: E402 + +update_forward_refs(AgentLinkedFileResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(AgentResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(EvaluatorResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(FlowResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(MonitoringEvaluatorResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(PromptResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(ToolResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(VersionDeploymentResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(VersionIdResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(EvaluatorLogResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(FlowLogResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(PromptLogResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(ToolLogResponse, AgentLogResponse=AgentLogResponse) +update_forward_refs(AgentLogResponse) diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py new file mode 100644 index 00000000..5cb07628 --- /dev/null +++ b/src/humanloop/types/agent_log_response_tool_choice.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .tool_choice import ToolChoice + +AgentLogResponseToolChoice = typing.Union[ + typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice +] diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py new file mode 100644 index 00000000..91547189 --- /dev/null +++ b/src/humanloop/types/agent_log_stream_response.py @@ -0,0 +1,98 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +import pydantic +import datetime as dt +from .chat_message import ChatMessage +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AgentLogStreamResponse(UncheckedBaseModel): + """ + Prompt specific log output shared between PromptLogRequest and PromptCallLogResponse. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None) + """ + The message returned by the provider. + """ + + prompt_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the prompt used to generate the output. + """ + + reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of reasoning tokens used to generate the output. + """ + + output_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + Number of tokens in the output generated by the model. + """ + + prompt_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the prompt. + """ + + output_cost: typing.Optional[float] = pydantic.Field(default=None) + """ + Cost in dollars associated to the tokens in the output. + """ + + finish_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + Reason the generation finished. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + agent_id: str = pydantic.Field() + """ + ID of the Agent the log belongs to. + """ + + version_id: str = pydantic.Field() + """ + ID of the specific version of the Agent. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py new file mode 100644 index 00000000..0b97a8e2 --- /dev/null +++ b/src/humanloop/types/agent_response.py @@ -0,0 +1,273 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .model_endpoints import ModelEndpoints +from .agent_response_template import AgentResponseTemplate +from .template_language import TemplateLanguage +from .model_providers import ModelProviders +from .agent_response_stop import AgentResponseStop +from .response_format import ResponseFormat +from .reasoning_effort import ReasoningEffort +import typing_extensions +from ..core.serialization import FieldMetadata +from .environment_response import EnvironmentResponse +import datetime as dt +from .user_response import UserResponse +from .version_status import VersionStatus +from .input_response import InputResponse +from .evaluator_aggregate import EvaluatorAggregate +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class AgentResponse(UncheckedBaseModel): + """ + Base type that all File Responses should inherit from. + + Attributes defined here are common to all File Responses and should be overridden + in the inheriting classes with documentation and appropriate Field definitions. + """ + + path: str = pydantic.Field() + """ + Path of the Agent, including the name, which is used as a unique identifier. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + directory_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the directory that the file is in on Humanloop. + """ + + model: str = pydantic.Field() + """ + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + """ + + endpoint: typing.Optional[ModelEndpoints] = pydantic.Field(default=None) + """ + The provider model endpoint used. + """ + + template: typing.Optional[AgentResponseTemplate] = pydantic.Field(default=None) + """ + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + """ + + template_language: typing.Optional[TemplateLanguage] = pydantic.Field(default=None) + """ + The template language to use for rendering the template. + """ + + provider: typing.Optional[ModelProviders] = pydantic.Field(default=None) + """ + The company providing the underlying model service. + """ + + max_tokens: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + """ + + temperature: typing.Optional[float] = pydantic.Field(default=None) + """ + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + """ + + top_p: typing.Optional[float] = pydantic.Field(default=None) + """ + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + """ + + stop: typing.Optional[AgentResponseStop] = pydantic.Field(default=None) + """ + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + """ + + presence_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + """ + + frequency_penalty: typing.Optional[float] = pydantic.Field(default=None) + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + """ + + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Other parameter values to be passed to the provider call. + """ + + seed: typing.Optional[int] = pydantic.Field(default=None) + """ + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + """ + + response_format: typing.Optional[ResponseFormat] = pydantic.Field(default=None) + """ + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + """ + + reasoning_effort: typing.Optional[ReasoningEffort] = pydantic.Field(default=None) + """ + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + """ + + tools: typing.List["AgentResponseToolsItem"] = pydantic.Field() + """ + List of tools that the Agent can call. These can be linked files or inline tools. + """ + + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + """ + + max_iterations: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of iterations the Agent can run. This is used to limit the number of times the Agent model is called. + """ + + commit_message: typing.Optional[str] = pydantic.Field(default=None) + """ + Message describing the changes made. + """ + + version_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique name for the Prompt version. Each Prompt can only have one version with a given name. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Agent. + """ + + tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of tags associated with the file. + """ + + readme: typing.Optional[str] = pydantic.Field(default=None) + """ + Long description of the file. + """ + + name: str = pydantic.Field() + """ + Name of the Agent. + """ + + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the specific Agent Version. If no query params provided, the default deployed Agent Version is returned. + """ + + type: typing.Optional[typing.Literal["agent"]] = None + environments: typing.Optional[typing.List[EnvironmentResponse]] = pydantic.Field(default=None) + """ + The list of environments the Agent Version is deployed to. + """ + + created_at: dt.datetime + updated_at: dt.datetime + created_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who created the Agent. + """ + + committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) + """ + The user who committed the Agent Version. + """ + + committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + The date and time the Agent Version was committed. + """ + + status: VersionStatus = pydantic.Field() + """ + The status of the Agent Version. + """ + + last_used_at: dt.datetime + version_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated for this Agent Version + """ + + total_logs_count: int = pydantic.Field() + """ + The number of logs that have been generated across all Agent Versions + """ + + inputs: typing.List[InputResponse] = pydantic.Field() + """ + Inputs associated to the Agent. Inputs correspond to any of the variables used within the Agent template. + """ + + evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None) + """ + Evaluators that have been attached to this Agent that are used for monitoring logs. + """ + + evaluator_aggregates: typing.Optional[typing.List[EvaluatorAggregate]] = pydantic.Field(default=None) + """ + Aggregation of Evaluator results for the Agent Version. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .evaluator_response import EvaluatorResponse # noqa: E402 +from .flow_response import FlowResponse # noqa: E402 +from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 +from .prompt_response import PromptResponse # noqa: E402 +from .tool_response import ToolResponse # noqa: E402 +from .version_deployment_response import VersionDeploymentResponse # noqa: E402 +from .version_id_response import VersionIdResponse # noqa: E402 +from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402 + +update_forward_refs(AgentLinkedFileResponse, AgentResponse=AgentResponse) +update_forward_refs(EvaluatorResponse, AgentResponse=AgentResponse) +update_forward_refs(FlowResponse, AgentResponse=AgentResponse) +update_forward_refs(MonitoringEvaluatorResponse, AgentResponse=AgentResponse) +update_forward_refs(PromptResponse, AgentResponse=AgentResponse) +update_forward_refs(ToolResponse, AgentResponse=AgentResponse) +update_forward_refs(VersionDeploymentResponse, AgentResponse=AgentResponse) +update_forward_refs(VersionIdResponse, AgentResponse=AgentResponse) +update_forward_refs(AgentResponse) diff --git a/src/humanloop/types/agent_response_stop.py b/src/humanloop/types/agent_response_stop.py new file mode 100644 index 00000000..5c3b6a48 --- /dev/null +++ b/src/humanloop/types/agent_response_stop.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AgentResponseStop = typing.Union[str, typing.List[str]] diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py new file mode 100644 index 00000000..4c084dc8 --- /dev/null +++ b/src/humanloop/types/agent_response_template.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .chat_message import ChatMessage + +AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]] diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py new file mode 100644 index 00000000..8095608f --- /dev/null +++ b/src/humanloop/types/agent_response_tools_item.py @@ -0,0 +1,10 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +import typing +from .agent_inline_tool import AgentInlineTool +import typing + +if typing.TYPE_CHECKING: + from .agent_linked_file_response import AgentLinkedFileResponse +AgentResponseToolsItem = typing.Union["AgentLinkedFileResponse", AgentInlineTool] diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py new file mode 100644 index 00000000..e9651a80 --- /dev/null +++ b/src/humanloop/types/create_agent_log_response.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .log_status import LogStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CreateAgentLogResponse(UncheckedBaseModel): + """ + Response for an Agent Log. + """ + + id: str = pydantic.Field() + """ + Unique identifier for the Log. + """ + + agent_id: str = pydantic.Field() + """ + Unique identifier for the Agent. + """ + + version_id: str = pydantic.Field() + """ + Unique identifier for the Flow Version. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py index 41b44cba..b4583d94 100644 --- a/src/humanloop/types/dataset_response.py +++ b/src/humanloop/types/dataset_response.py @@ -3,6 +3,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import pydantic import typing +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -44,6 +46,13 @@ class DatasetResponse(UncheckedBaseModel): Description of the Dataset. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py index 090b5c98..abce1204 100644 --- a/src/humanloop/types/directory_with_parents_and_children_response.py +++ b/src/humanloop/types/directory_with_parents_and_children_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -81,6 +83,10 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs( + AgentLinkedFileResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse +) +update_forward_refs(AgentResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) update_forward_refs(EvaluatorResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) update_forward_refs(FlowResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) update_forward_refs( diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py index 0bfeebf7..9d0d5fc4 100644 --- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py +++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py @@ -6,7 +6,8 @@ from .evaluator_response import EvaluatorResponse from .dataset_response import DatasetResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[ - PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse + PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py index 97007485..f424caa2 100644 --- a/src/humanloop/types/evaluatee_response.py +++ b/src/humanloop/types/evaluatee_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -53,6 +55,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluateeResponse=EvaluateeResponse) +update_forward_refs(AgentResponse, EvaluateeResponse=EvaluateeResponse) update_forward_refs(EvaluatorResponse, EvaluateeResponse=EvaluateeResponse) update_forward_refs(FlowResponse, EvaluateeResponse=EvaluateeResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluateeResponse=EvaluateeResponse) diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py index 84b3c746..ca3ad3f9 100644 --- a/src/humanloop/types/evaluation_evaluator_response.py +++ b/src/humanloop/types/evaluation_evaluator_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -38,6 +40,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) +update_forward_refs(AgentResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) update_forward_refs(EvaluatorResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) update_forward_refs(FlowResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py index 32ff5b40..0f1699ad 100644 --- a/src/humanloop/types/evaluation_log_response.py +++ b/src/humanloop/types/evaluation_log_response.py @@ -2,6 +2,9 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse @@ -52,6 +55,9 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluationLogResponse=EvaluationLogResponse) +update_forward_refs(AgentLogResponse, EvaluationLogResponse=EvaluationLogResponse) +update_forward_refs(AgentResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(EvaluatorLogResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(EvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse) update_forward_refs(FlowLogResponse, EvaluationLogResponse=EvaluationLogResponse) diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py index a4c2336a..6f189a7d 100644 --- a/src/humanloop/types/evaluation_response.py +++ b/src/humanloop/types/evaluation_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -62,6 +64,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluationResponse=EvaluationResponse) +update_forward_refs(AgentResponse, EvaluationResponse=EvaluationResponse) update_forward_refs(EvaluatorResponse, EvaluationResponse=EvaluationResponse) update_forward_refs(FlowResponse, EvaluationResponse=EvaluationResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluationResponse=EvaluationResponse) diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py index 46f9308d..64a63ea1 100644 --- a/src/humanloop/types/evaluation_run_response.py +++ b/src/humanloop/types/evaluation_run_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -76,6 +78,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluationRunResponse=EvaluationRunResponse) +update_forward_refs(AgentResponse, EvaluationRunResponse=EvaluationRunResponse) update_forward_refs(EvaluatorResponse, EvaluationRunResponse=EvaluationRunResponse) update_forward_refs(FlowResponse, EvaluationRunResponse=EvaluationRunResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluationRunResponse=EvaluationRunResponse) diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py index 208a7529..abc39351 100644 --- a/src/humanloop/types/evaluation_runs_response.py +++ b/src/humanloop/types/evaluation_runs_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -32,6 +34,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, EvaluationRunsResponse=EvaluationRunsResponse) +update_forward_refs(AgentResponse, EvaluationRunsResponse=EvaluationRunsResponse) update_forward_refs(EvaluatorResponse, EvaluationRunsResponse=EvaluationRunsResponse) update_forward_refs(FlowResponse, EvaluationRunsResponse=EvaluationRunsResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluationRunsResponse=EvaluationRunsResponse) diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py index 967217c5..9378254f 100644 --- a/src/humanloop/types/evaluator_log_response.py +++ b/src/humanloop/types/evaluator_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -189,11 +191,15 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, EvaluatorLogResponse=EvaluatorLogResponse) +update_forward_refs(AgentLogResponse, EvaluatorLogResponse=EvaluatorLogResponse) +update_forward_refs(AgentResponse, EvaluatorLogResponse=EvaluatorLogResponse) update_forward_refs(EvaluatorResponse, EvaluatorLogResponse=EvaluatorLogResponse) update_forward_refs(FlowLogResponse, EvaluatorLogResponse=EvaluatorLogResponse) update_forward_refs(FlowResponse, EvaluatorLogResponse=EvaluatorLogResponse) diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py index 0743086c..f0cdce57 100644 --- a/src/humanloop/types/evaluator_response.py +++ b/src/humanloop/types/evaluator_response.py @@ -5,6 +5,8 @@ import pydantic import typing from .evaluator_response_spec import EvaluatorResponseSpec +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -51,6 +53,13 @@ class EvaluatorResponse(UncheckedBaseModel): Description of the Evaluator. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. @@ -131,6 +140,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 @@ -138,6 +149,8 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, EvaluatorResponse=EvaluatorResponse) +update_forward_refs(AgentResponse, EvaluatorResponse=EvaluatorResponse) update_forward_refs(FlowResponse, EvaluatorResponse=EvaluatorResponse) update_forward_refs(MonitoringEvaluatorResponse, EvaluatorResponse=EvaluatorResponse) update_forward_refs(PromptResponse, EvaluatorResponse=EvaluatorResponse) diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py new file mode 100644 index 00000000..920dacdf --- /dev/null +++ b/src/humanloop/types/event_type.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EventType = typing.Union[ + typing.Literal[ + "__begin__", + "__wait__", + "__complete__", + "agent_start", + "agent_update", + "agent_end", + "tool_start", + "tool_update", + "tool_end", + "error", + ], + typing.Any, +] diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py index 8f0dc1a8..c3611f35 100644 --- a/src/humanloop/types/file_environment_response.py +++ b/src/humanloop/types/file_environment_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -44,6 +46,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, FileEnvironmentResponse=FileEnvironmentResponse) +update_forward_refs(AgentResponse, FileEnvironmentResponse=FileEnvironmentResponse) update_forward_refs(EvaluatorResponse, FileEnvironmentResponse=FileEnvironmentResponse) update_forward_refs(FlowResponse, FileEnvironmentResponse=FileEnvironmentResponse) update_forward_refs(MonitoringEvaluatorResponse, FileEnvironmentResponse=FileEnvironmentResponse) diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py index 2a105c9d..0254c2b8 100644 --- a/src/humanloop/types/file_environment_response_file.py +++ b/src/humanloop/types/file_environment_response_file.py @@ -6,7 +6,8 @@ from .dataset_response import DatasetResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse FileEnvironmentResponseFile = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse + PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse ] diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py new file mode 100644 index 00000000..8108245b --- /dev/null +++ b/src/humanloop/types/file_environment_variable_request.py @@ -0,0 +1,27 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class FileEnvironmentVariableRequest(UncheckedBaseModel): + name: str = pydantic.Field() + """ + Name of the environment variable. + """ + + value: str = pydantic.Field() + """ + Value of the environment variable. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/file_type.py b/src/humanloop/types/file_type.py index 7a870b84..f235825b 100644 --- a/src/humanloop/types/file_type.py +++ b/src/humanloop/types/file_type.py @@ -2,4 +2,4 @@ import typing -FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow"], typing.Any] +FileType = typing.Union[typing.Literal["prompt", "tool", "dataset", "evaluator", "flow", "agent"], typing.Any] diff --git a/src/humanloop/types/files_tool_type.py b/src/humanloop/types/files_tool_type.py index c32b9755..753d9ba2 100644 --- a/src/humanloop/types/files_tool_type.py +++ b/src/humanloop/types/files_tool_type.py @@ -3,5 +3,5 @@ import typing FilesToolType = typing.Union[ - typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call"], typing.Any + typing.Literal["pinecone_search", "google", "mock", "snippet", "json_schema", "get_api_call", "python"], typing.Any ] diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py index 01ae2cb0..68260a35 100644 --- a/src/humanloop/types/flow_log_response.py +++ b/src/humanloop/types/flow_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -173,11 +175,15 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, FlowLogResponse=FlowLogResponse) +update_forward_refs(AgentLogResponse, FlowLogResponse=FlowLogResponse) +update_forward_refs(AgentResponse, FlowLogResponse=FlowLogResponse) update_forward_refs(EvaluatorLogResponse, FlowLogResponse=FlowLogResponse) update_forward_refs(EvaluatorResponse, FlowLogResponse=FlowLogResponse) update_forward_refs(FlowResponse, FlowLogResponse=FlowLogResponse) diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py index f90dcca2..0647ee45 100644 --- a/src/humanloop/types/flow_response.py +++ b/src/humanloop/types/flow_response.py @@ -4,6 +4,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import pydantic import typing +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -53,6 +55,13 @@ class FlowResponse(UncheckedBaseModel): Description of the Flow. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the File. + """ + readme: typing.Optional[str] = pydantic.Field(default=None) """ Long description of the file. @@ -122,6 +131,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 @@ -129,6 +140,8 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, FlowResponse=FlowResponse) +update_forward_refs(AgentResponse, FlowResponse=FlowResponse) update_forward_refs(EvaluatorResponse, FlowResponse=FlowResponse) update_forward_refs(MonitoringEvaluatorResponse, FlowResponse=FlowResponse) update_forward_refs(PromptResponse, FlowResponse=FlowResponse) diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py new file mode 100644 index 00000000..ee45ffdf --- /dev/null +++ b/src/humanloop/types/linked_file_request.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class LinkedFileRequest(UncheckedBaseModel): + file_id: str + environment_id: typing.Optional[str] = None + version_id: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py new file mode 100644 index 00000000..220ac4e3 --- /dev/null +++ b/src/humanloop/types/list_agents.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class ListAgents(UncheckedBaseModel): + records: typing.List[AgentResponse] = pydantic.Field() + """ + The list of Agents. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(AgentLinkedFileResponse, ListAgents=ListAgents) +update_forward_refs(AgentResponse, ListAgents=ListAgents) +update_forward_refs(EvaluatorResponse, ListAgents=ListAgents) +update_forward_refs(FlowResponse, ListAgents=ListAgents) +update_forward_refs(MonitoringEvaluatorResponse, ListAgents=ListAgents) +update_forward_refs(PromptResponse, ListAgents=ListAgents) +update_forward_refs(ToolResponse, ListAgents=ListAgents) +update_forward_refs(VersionDeploymentResponse, ListAgents=ListAgents) +update_forward_refs(VersionIdResponse, ListAgents=ListAgents) diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py index 341ec7ba..c288eb98 100644 --- a/src/humanloop/types/list_evaluators.py +++ b/src/humanloop/types/list_evaluators.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, ListEvaluators=ListEvaluators) +update_forward_refs(AgentResponse, ListEvaluators=ListEvaluators) update_forward_refs(EvaluatorResponse, ListEvaluators=ListEvaluators) update_forward_refs(FlowResponse, ListEvaluators=ListEvaluators) update_forward_refs(MonitoringEvaluatorResponse, ListEvaluators=ListEvaluators) diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py index bf593240..d349dafe 100644 --- a/src/humanloop/types/list_flows.py +++ b/src/humanloop/types/list_flows.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, ListFlows=ListFlows) +update_forward_refs(AgentResponse, ListFlows=ListFlows) update_forward_refs(EvaluatorResponse, ListFlows=ListFlows) update_forward_refs(FlowResponse, ListFlows=ListFlows) update_forward_refs(MonitoringEvaluatorResponse, ListFlows=ListFlows) diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py index 34253841..9abdf7a4 100644 --- a/src/humanloop/types/list_prompts.py +++ b/src/humanloop/types/list_prompts.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, ListPrompts=ListPrompts) +update_forward_refs(AgentResponse, ListPrompts=ListPrompts) update_forward_refs(EvaluatorResponse, ListPrompts=ListPrompts) update_forward_refs(FlowResponse, ListPrompts=ListPrompts) update_forward_refs(MonitoringEvaluatorResponse, ListPrompts=ListPrompts) diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py index bf170eb9..673557eb 100644 --- a/src/humanloop/types/list_tools.py +++ b/src/humanloop/types/list_tools.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, ListTools=ListTools) +update_forward_refs(AgentResponse, ListTools=ListTools) update_forward_refs(EvaluatorResponse, ListTools=ListTools) update_forward_refs(FlowResponse, ListTools=ListTools) update_forward_refs(MonitoringEvaluatorResponse, ListTools=ListTools) diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py index 0ba81dd3..cd7a0a26 100644 --- a/src/humanloop/types/log_response.py +++ b/src/humanloop/types/log_response.py @@ -9,4 +9,7 @@ from .tool_log_response import ToolLogResponse from .evaluator_log_response import EvaluatorLogResponse from .flow_log_response import FlowLogResponse -LogResponse = typing.Union["PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse"] + from .agent_log_response import AgentLogResponse +LogResponse = typing.Union[ + "PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse" +] diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py new file mode 100644 index 00000000..69ffacf4 --- /dev/null +++ b/src/humanloop/types/log_stream_response.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .prompt_call_stream_response import PromptCallStreamResponse +from .agent_log_stream_response import AgentLogStreamResponse + +LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse] diff --git a/src/humanloop/types/model_providers.py b/src/humanloop/types/model_providers.py index 8473d2ae..3f2c99fb 100644 --- a/src/humanloop/types/model_providers.py +++ b/src/humanloop/types/model_providers.py @@ -4,7 +4,7 @@ ModelProviders = typing.Union[ typing.Literal[ - "openai", "openai_azure", "mock", "anthropic", "bedrock", "cohere", "replicate", "google", "groq", "deepseek" + "anthropic", "bedrock", "cohere", "deepseek", "google", "groq", "mock", "openai", "openai_azure", "replicate" ], typing.Any, ] diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py index 4d79fcdc..34ddaabe 100644 --- a/src/humanloop/types/monitoring_evaluator_response.py +++ b/src/humanloop/types/monitoring_evaluator_response.py @@ -39,6 +39,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .prompt_response import PromptResponse # noqa: E402 @@ -47,6 +49,8 @@ class Config: from .version_id_response import VersionIdResponse # noqa: E402 from .version_reference_response import VersionReferenceResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) +update_forward_refs(AgentResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) update_forward_refs(EvaluatorResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) update_forward_refs(FlowResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) update_forward_refs(PromptResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py new file mode 100644 index 00000000..9de6c71d --- /dev/null +++ b/src/humanloop/types/paginated_data_agent_response.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from ..core.pydantic_utilities import update_forward_refs + + +class PaginatedDataAgentResponse(UncheckedBaseModel): + records: typing.List[AgentResponse] + page: int + size: int + total: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(AgentLinkedFileResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(AgentResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(EvaluatorResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(FlowResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(PromptResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(ToolResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(VersionDeploymentResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) +update_forward_refs(VersionIdResponse, PaginatedDataAgentResponse=PaginatedDataAgentResponse) diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py index c6e19791..fb5371a0 100644 --- a/src/humanloop/types/paginated_data_evaluation_log_response.py +++ b/src/humanloop/types/paginated_data_evaluation_log_response.py @@ -2,6 +2,9 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse @@ -36,6 +39,9 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) +update_forward_refs(AgentLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) +update_forward_refs(AgentResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) update_forward_refs(EvaluatorLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) update_forward_refs(EvaluatorResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) update_forward_refs(FlowLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py index 86ee982a..db7a895d 100644 --- a/src/humanloop/types/paginated_data_evaluator_response.py +++ b/src/humanloop/types/paginated_data_evaluator_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) +update_forward_refs(AgentResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) update_forward_refs(EvaluatorResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) update_forward_refs(FlowResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py index 9ffc2eb6..b7b38f56 100644 --- a/src/humanloop/types/paginated_data_flow_response.py +++ b/src/humanloop/types/paginated_data_flow_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) +update_forward_refs(AgentResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) update_forward_refs(EvaluatorResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) update_forward_refs(FlowResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py index 7352f17e..e8f9fdd0 100644 --- a/src/humanloop/types/paginated_data_log_response.py +++ b/src/humanloop/types/paginated_data_log_response.py @@ -2,6 +2,9 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_log_response import AgentLogResponse +from .agent_response import AgentResponse from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse from .flow_log_response import FlowLogResponse @@ -36,6 +39,9 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) +update_forward_refs(AgentLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) +update_forward_refs(AgentResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) update_forward_refs(EvaluatorLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) update_forward_refs(EvaluatorResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) update_forward_refs(FlowLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py index db64dd96..cc34e571 100644 --- a/src/humanloop/types/paginated_data_prompt_response.py +++ b/src/humanloop/types/paginated_data_prompt_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) +update_forward_refs(AgentResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) update_forward_refs(EvaluatorResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) update_forward_refs(FlowResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py index e7ae59a9..733bcdec 100644 --- a/src/humanloop/types/paginated_data_tool_response.py +++ b/src/humanloop/types/paginated_data_tool_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -31,6 +33,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) +update_forward_refs(AgentResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) update_forward_refs(EvaluatorResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) update_forward_refs(FlowResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py similarity index 52% rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py index 0e982fbc..14a0c440 100644 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -10,17 +12,19 @@ from .version_deployment_response import VersionDeploymentResponse from .version_id_response import VersionIdResponse import typing -from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item import ( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem, +from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem, ) from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs -class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel): +class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse( + UncheckedBaseModel +): records: typing.List[ - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem ] page: int size: int @@ -36,31 +40,39 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs( + AgentLinkedFileResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, +) +update_forward_refs( + AgentResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, +) update_forward_refs( EvaluatorResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( FlowResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( MonitoringEvaluatorResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( PromptResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( ToolResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( VersionDeploymentResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) update_forward_refs( VersionIdResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse, ) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py similarity index 63% rename from src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py rename to src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py index 65c4f324..a1b4f056 100644 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_records_item.py +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py @@ -6,7 +6,8 @@ from .dataset_response import DatasetResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse -PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseRecordsItem = typing.Union[ - PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse -] +PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = ( + typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse] +) diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py index 59ec4400..debf4c94 100644 --- a/src/humanloop/types/paginated_evaluation_response.py +++ b/src/humanloop/types/paginated_evaluation_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -32,6 +34,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) +update_forward_refs(AgentResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) update_forward_refs(EvaluatorResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) update_forward_refs(FlowResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) update_forward_refs(MonitoringEvaluatorResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py index f1674347..52cdc163 100644 --- a/src/humanloop/types/populate_template_response.py +++ b/src/humanloop/types/populate_template_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -20,6 +22,8 @@ from .reasoning_effort import ReasoningEffort from .tool_function import ToolFunction from .linked_tool_response import LinkedToolResponse +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -174,6 +178,13 @@ class PopulateTemplateResponse(UncheckedBaseModel): Name of the Prompt. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + version_id: str = pydantic.Field() """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. @@ -248,6 +259,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PopulateTemplateResponse=PopulateTemplateResponse) +update_forward_refs(AgentResponse, PopulateTemplateResponse=PopulateTemplateResponse) update_forward_refs(EvaluatorResponse, PopulateTemplateResponse=PopulateTemplateResponse) update_forward_refs(FlowResponse, PopulateTemplateResponse=PopulateTemplateResponse) update_forward_refs(MonitoringEvaluatorResponse, PopulateTemplateResponse=PopulateTemplateResponse) diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py index 067d14bc..a3538575 100644 --- a/src/humanloop/types/prompt_call_response.py +++ b/src/humanloop/types/prompt_call_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -129,6 +131,8 @@ class Config: extra = pydantic.Extra.allow +update_forward_refs(AgentLinkedFileResponse, PromptCallResponse=PromptCallResponse) +update_forward_refs(AgentResponse, PromptCallResponse=PromptCallResponse) update_forward_refs(EvaluatorResponse, PromptCallResponse=PromptCallResponse) update_forward_refs(FlowResponse, PromptCallResponse=PromptCallResponse) update_forward_refs(MonitoringEvaluatorResponse, PromptCallResponse=PromptCallResponse) diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py index 6461bb19..01ccc5fd 100644 --- a/src/humanloop/types/prompt_kernel_request.py +++ b/src/humanloop/types/prompt_kernel_request.py @@ -15,6 +15,12 @@ class PromptKernelRequest(UncheckedBaseModel): + """ + Base class used by both PromptKernelRequest and AgentKernelRequest. + + Contains the consistent Prompt-related fields. + """ + model: str = pydantic.Field() """ The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py index 9f699959..b0a51f97 100644 --- a/src/humanloop/types/prompt_log_response.py +++ b/src/humanloop/types/prompt_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -213,11 +215,14 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, PromptLogResponse=PromptLogResponse) +update_forward_refs(AgentResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(EvaluatorResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(FlowResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(MonitoringEvaluatorResponse, PromptLogResponse=PromptLogResponse) @@ -225,6 +230,7 @@ class Config: update_forward_refs(ToolResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(VersionDeploymentResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(VersionIdResponse, PromptLogResponse=PromptLogResponse) +update_forward_refs(AgentLogResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(EvaluatorLogResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(FlowLogResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(ToolLogResponse, PromptLogResponse=PromptLogResponse) diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py index 384a295a..5e741e9e 100644 --- a/src/humanloop/types/prompt_response.py +++ b/src/humanloop/types/prompt_response.py @@ -13,6 +13,8 @@ from .reasoning_effort import ReasoningEffort from .tool_function import ToolFunction from .linked_tool_response import LinkedToolResponse +import typing_extensions +from ..core.serialization import FieldMetadata from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse @@ -166,6 +168,13 @@ class PromptResponse(UncheckedBaseModel): Name of the Prompt. """ + schema_: typing_extensions.Annotated[ + typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]], FieldMetadata(alias="schema") + ] = pydantic.Field(default=None) + """ + The JSON schema for the Prompt. + """ + version_id: str = pydantic.Field() """ Unique identifier for the specific Prompt Version. If no query params provided, the default deployed Prompt Version is returned. @@ -235,6 +244,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 @@ -242,6 +253,8 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, PromptResponse=PromptResponse) +update_forward_refs(AgentResponse, PromptResponse=PromptResponse) update_forward_refs(EvaluatorResponse, PromptResponse=PromptResponse) update_forward_refs(FlowResponse, PromptResponse=PromptResponse) update_forward_refs(MonitoringEvaluatorResponse, PromptResponse=PromptResponse) diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py index d94b1178..770dc487 100644 --- a/src/humanloop/types/run_version_response.py +++ b/src/humanloop/types/run_version_response.py @@ -5,5 +5,6 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse +from .agent_response import AgentResponse -RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse] +RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse] diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py new file mode 100644 index 00000000..d846c68c --- /dev/null +++ b/src/humanloop/types/tool_call_response.py @@ -0,0 +1,186 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse +from .evaluator_response import EvaluatorResponse +from .flow_response import FlowResponse +from .monitoring_evaluator_response import MonitoringEvaluatorResponse +from .prompt_response import PromptResponse +from .tool_response import ToolResponse +from .version_deployment_response import VersionDeploymentResponse +from .version_id_response import VersionIdResponse +from .agent_log_response import AgentLogResponse +from .evaluator_log_response import EvaluatorLogResponse +from .flow_log_response import FlowLogResponse +from .prompt_log_response import PromptLogResponse +from .tool_log_response import ToolLogResponse +import typing +import datetime as dt +import pydantic +from .log_status import LogStatus +from .log_response import LogResponse +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import update_forward_refs + + +class ToolCallResponse(UncheckedBaseModel): + """ + Response model for a Tool call. + """ + + start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event started. + """ + + end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + When the logged event ended. + """ + + tool: ToolResponse = pydantic.Field() + """ + Tool used to generate the Log. + """ + + output: typing.Optional[str] = pydantic.Field(default=None) + """ + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + """ + + created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) + """ + User defined timestamp for when the log was created. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Error message if the log is an error. + """ + + provider_latency: typing.Optional[float] = pydantic.Field(default=None) + """ + Duration of the logged event in seconds. + """ + + stdout: typing.Optional[str] = pydantic.Field(default=None) + """ + Captured log and debug statements. + """ + + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw request sent to provider. + """ + + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Raw response received the provider. + """ + + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + The inputs passed to the prompt template. + """ + + source: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifies where the model was called from. + """ + + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Any additional metadata to record. + """ + + log_status: typing.Optional[LogStatus] = pydantic.Field(default=None) + """ + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + """ + + source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + """ + + trace_parent_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the parent Log to nest this Log under in a Trace. + """ + + batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations + """ + + user: typing.Optional[str] = pydantic.Field(default=None) + """ + End-user ID related to the Log. + """ + + environment: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the Environment the Log is associated to. + """ + + save: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the request/response payloads will be stored on Humanloop. + """ + + log_id: typing.Optional[str] = pydantic.Field(default=None) + """ + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + """ + + id: str = pydantic.Field() + """ + ID of the log. + """ + + evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field() + """ + List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log. + """ + + trace_flow_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Identifier for the Flow that the Trace belongs to. + """ + + trace_id: typing.Optional[str] = pydantic.Field(default=None) + """ + ID of the Trace containing the Tool Call Log. + """ + + trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None) + """ + Logs nested under this Log in the Trace. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +update_forward_refs(AgentLinkedFileResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(AgentResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(EvaluatorResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(FlowResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(MonitoringEvaluatorResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(PromptResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(ToolResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(VersionDeploymentResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(VersionIdResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(AgentLogResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(EvaluatorLogResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(FlowLogResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(PromptLogResponse, ToolCallResponse=ToolCallResponse) +update_forward_refs(ToolLogResponse, ToolCallResponse=ToolCallResponse) diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py index 3680ef38..bb5989e7 100644 --- a/src/humanloop/types/tool_log_response.py +++ b/src/humanloop/types/tool_log_response.py @@ -2,6 +2,8 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel +from .agent_linked_file_response import AgentLinkedFileResponse +from .agent_response import AgentResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse from .monitoring_evaluator_response import MonitoringEvaluatorResponse @@ -162,11 +164,15 @@ class Config: extra = pydantic.Extra.allow +from .agent_log_response import AgentLogResponse # noqa: E402 from .evaluator_log_response import EvaluatorLogResponse # noqa: E402 from .flow_log_response import FlowLogResponse # noqa: E402 from .prompt_log_response import PromptLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, ToolLogResponse=ToolLogResponse) +update_forward_refs(AgentLogResponse, ToolLogResponse=ToolLogResponse) +update_forward_refs(AgentResponse, ToolLogResponse=ToolLogResponse) update_forward_refs(EvaluatorLogResponse, ToolLogResponse=ToolLogResponse) update_forward_refs(EvaluatorResponse, ToolLogResponse=ToolLogResponse) update_forward_refs(FlowLogResponse, ToolLogResponse=ToolLogResponse) diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py index 41f84766..9601169b 100644 --- a/src/humanloop/types/tool_response.py +++ b/src/humanloop/types/tool_response.py @@ -163,6 +163,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 @@ -170,6 +172,8 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, ToolResponse=ToolResponse) +update_forward_refs(AgentResponse, ToolResponse=ToolResponse) update_forward_refs(EvaluatorResponse, ToolResponse=ToolResponse) update_forward_refs(FlowResponse, ToolResponse=ToolResponse) update_forward_refs(MonitoringEvaluatorResponse, ToolResponse=ToolResponse) diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py index 012b0583..3087d5b4 100644 --- a/src/humanloop/types/version_deployment_response.py +++ b/src/humanloop/types/version_deployment_response.py @@ -36,6 +36,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 @@ -44,6 +46,8 @@ class Config: from .version_id_response import VersionIdResponse # noqa: E402 from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, VersionDeploymentResponse=VersionDeploymentResponse) +update_forward_refs(AgentResponse, VersionDeploymentResponse=VersionDeploymentResponse) update_forward_refs(EvaluatorResponse, VersionDeploymentResponse=VersionDeploymentResponse) update_forward_refs(FlowResponse, VersionDeploymentResponse=VersionDeploymentResponse) update_forward_refs(MonitoringEvaluatorResponse, VersionDeploymentResponse=VersionDeploymentResponse) diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py index e0f73573..4fadcff0 100644 --- a/src/humanloop/types/version_deployment_response_file.py +++ b/src/humanloop/types/version_deployment_response_file.py @@ -10,6 +10,7 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse + from .agent_response import AgentResponse VersionDeploymentResponseFile = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse" + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" ] diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py index 8670d853..c3a90f84 100644 --- a/src/humanloop/types/version_id_response.py +++ b/src/humanloop/types/version_id_response.py @@ -30,6 +30,8 @@ class Config: extra = pydantic.Extra.allow +from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402 +from .agent_response import AgentResponse # noqa: E402 from .evaluator_response import EvaluatorResponse # noqa: E402 from .flow_response import FlowResponse # noqa: E402 from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402 @@ -38,6 +40,8 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response_version import VersionIdResponseVersion # noqa: E402 +update_forward_refs(AgentLinkedFileResponse, VersionIdResponse=VersionIdResponse) +update_forward_refs(AgentResponse, VersionIdResponse=VersionIdResponse) update_forward_refs(EvaluatorResponse, VersionIdResponse=VersionIdResponse) update_forward_refs(FlowResponse, VersionIdResponse=VersionIdResponse) update_forward_refs(MonitoringEvaluatorResponse, VersionIdResponse=VersionIdResponse) diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py index 2f56346c..b1cbd45d 100644 --- a/src/humanloop/types/version_id_response_version.py +++ b/src/humanloop/types/version_id_response_version.py @@ -10,6 +10,7 @@ from .tool_response import ToolResponse from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse + from .agent_response import AgentResponse VersionIdResponseVersion = typing.Union[ - "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse" + "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse" ]