From 378aa373e0fba99ff474eb9c14dbb7ac37cc5dd2 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Fri, 9 May 2025 15:23:43 +0000
Subject: [PATCH 1/2] SDK regeneration
---
poetry.lock | 422 ++---
pyproject.toml | 28 +-
reference.md | 1531 ++++-------------
requirements.txt | 6 +-
src/humanloop/__init__.py | 30 +-
src/humanloop/agents/__init__.py | 14 +
src/humanloop/agents/client.py | 1138 +++---------
src/humanloop/agents/raw_client.py | 718 +++++---
src/humanloop/agents/requests/__init__.py | 8 +
.../requests/agent_log_request_agent.py | 7 +
.../requests/agent_log_request_tool_choice.py | 1 +
.../agent_request_reasoning_effort.py | 1 +
.../agents/requests/agent_request_template.py | 1 +
.../requests/agent_request_tools_item.py | 3 +-
.../requests/agents_call_request_agent.py | 7 +
.../agents_call_request_tool_choice.py | 1 +
.../agents_call_stream_request_agent.py | 7 +
.../agents_call_stream_request_tool_choice.py | 1 +
src/humanloop/agents/types/__init__.py | 8 +
.../agents/types/agent_log_request_agent.py | 7 +
.../types/agent_log_request_tool_choice.py | 1 +
.../types/agent_request_reasoning_effort.py | 1 +
.../agents/types/agent_request_template.py | 1 +
.../agents/types/agent_request_tools_item.py | 3 +-
.../agents/types/agents_call_request_agent.py | 7 +
.../types/agents_call_request_tool_choice.py | 1 +
.../types/agents_call_stream_request_agent.py | 7 +
.../agents_call_stream_request_tool_choice.py | 1 +
src/humanloop/base_client.py | 52 +-
src/humanloop/core/__init__.py | 2 +
src/humanloop/core/api_error.py | 18 +-
src/humanloop/core/client_wrapper.py | 8 +-
src/humanloop/core/http_response.py | 30 +-
src/humanloop/core/pagination.py | 95 +-
src/humanloop/core/pydantic_utilities.py | 177 +-
src/humanloop/datasets/__init__.py | 2 +
src/humanloop/datasets/client.py | 699 ++------
src/humanloop/datasets/raw_client.py | 615 +++++--
src/humanloop/datasets/types/__init__.py | 2 +
src/humanloop/directories/__init__.py | 2 +
src/humanloop/directories/client.py | 156 +-
src/humanloop/directories/raw_client.py | 107 +-
src/humanloop/errors/__init__.py | 2 +
.../errors/unprocessable_entity_error.py | 6 +-
src/humanloop/evaluations/__init__.py | 2 +
src/humanloop/evaluations/client.py | 591 ++-----
src/humanloop/evaluations/raw_client.py | 424 +++--
.../evaluations/requests/__init__.py | 2 +
.../add_evaluators_request_evaluators_item.py | 3 +-
...eate_evaluation_request_evaluators_item.py | 3 +-
.../requests/create_run_request_dataset.py | 3 +-
.../requests/create_run_request_version.py | 3 +-
src/humanloop/evaluations/types/__init__.py | 2 +
.../add_evaluators_request_evaluators_item.py | 3 +-
...eate_evaluation_request_evaluators_item.py | 3 +-
.../types/create_run_request_dataset.py | 3 +-
.../types/create_run_request_version.py | 3 +-
src/humanloop/evaluators/__init__.py | 2 +
src/humanloop/evaluators/client.py | 606 ++-----
src/humanloop/evaluators/raw_client.py | 460 +++--
src/humanloop/evaluators/requests/__init__.py | 2 +
.../create_evaluator_log_request_spec.py | 5 +-
.../requests/evaluator_request_spec.py | 5 +-
src/humanloop/evaluators/types/__init__.py | 2 +
.../create_evaluator_log_request_spec.py | 5 +-
.../types/evaluator_request_spec.py | 5 +-
src/humanloop/files/__init__.py | 2 +
src/humanloop/files/client.py | 117 +-
src/humanloop/files/raw_client.py | 97 +-
src/humanloop/files/requests/__init__.py | 2 +
...th_files_retrieve_by_path_post_response.py | 7 +-
src/humanloop/files/types/__init__.py | 2 +
...th_files_retrieve_by_path_post_response.py | 7 +-
src/humanloop/flows/__init__.py | 2 +
src/humanloop/flows/client.py | 732 ++------
src/humanloop/flows/raw_client.py | 476 +++--
src/humanloop/logs/__init__.py | 2 +
src/humanloop/logs/client.py | 264 +--
src/humanloop/logs/raw_client.py | 334 +++-
src/humanloop/prompts/__init__.py | 14 +
src/humanloop/prompts/client.py | 1019 +++--------
src/humanloop/prompts/raw_client.py | 698 +++++---
src/humanloop/prompts/requests/__init__.py | 8 +
.../requests/prompt_log_request_prompt.py | 7 +
.../prompt_log_request_tool_choice.py | 1 +
.../prompt_log_update_request_tool_choice.py | 1 +
.../prompt_request_reasoning_effort.py | 1 +
.../requests/prompt_request_template.py | 1 +
.../requests/prompts_call_request_prompt.py | 7 +
.../prompts_call_request_tool_choice.py | 1 +
.../prompts_call_stream_request_prompt.py | 7 +
...prompts_call_stream_request_tool_choice.py | 1 +
src/humanloop/prompts/types/__init__.py | 8 +
.../types/prompt_log_request_prompt.py | 7 +
.../types/prompt_log_request_tool_choice.py | 1 +
.../prompt_log_update_request_tool_choice.py | 1 +
.../types/prompt_request_reasoning_effort.py | 1 +
.../prompts/types/prompt_request_template.py | 1 +
.../types/prompts_call_request_prompt.py | 7 +
.../types/prompts_call_request_tool_choice.py | 1 +
.../prompts_call_stream_request_prompt.py | 7 +
...prompts_call_stream_request_tool_choice.py | 1 +
src/humanloop/requests/__init__.py | 2 +
src/humanloop/requests/agent_call_response.py | 10 +-
.../agent_call_response_tool_choice.py | 1 +
.../requests/agent_call_stream_response.py | 6 +-
.../agent_call_stream_response_payload.py | 3 +-
.../requests/agent_continue_call_response.py | 10 +-
...gent_continue_call_response_tool_choice.py | 1 +
.../agent_continue_call_stream_response.py | 6 +-
...t_continue_call_stream_response_payload.py | 3 +-
.../requests/agent_continue_response.py | 202 ---
.../agent_continue_response_tool_choice.py | 8 -
.../agent_continue_stream_response.py | 19 -
.../agent_continue_stream_response_payload.py | 8 -
src/humanloop/requests/agent_inline_tool.py | 4 +-
.../requests/agent_kernel_request.py | 12 +-
.../agent_kernel_request_reasoning_effort.py | 1 +
.../requests/agent_kernel_request_template.py | 1 +
.../agent_kernel_request_tools_item.py | 3 +-
.../requests/agent_linked_file_request.py | 4 +-
.../requests/agent_linked_file_response.py | 6 +-
.../agent_linked_file_response_file.py | 9 +-
src/humanloop/requests/agent_log_response.py | 12 +-
.../agent_log_response_tool_choice.py | 1 +
.../requests/agent_log_stream_response.py | 4 +-
src/humanloop/requests/agent_response.py | 27 +-
.../agent_response_reasoning_effort.py | 1 +
.../requests/agent_response_template.py | 1 +
.../requests/agent_response_tools_item.py | 3 +-
.../anthropic_redacted_thinking_content.py | 3 +-
.../requests/anthropic_thinking_content.py | 3 +-
src/humanloop/requests/chat_message.py | 8 +-
.../requests/chat_message_content.py | 1 +
.../requests/chat_message_content_item.py | 3 +-
.../requests/chat_message_thinking_item.py | 3 +-
.../requests/code_evaluator_request.py | 8 +-
.../requests/create_agent_log_response.py | 1 -
.../requests/create_datapoint_request.py | 4 +-
.../requests/create_evaluator_log_response.py | 1 -
.../requests/create_flow_log_response.py | 1 -
.../requests/create_prompt_log_response.py | 1 -
.../requests/create_tool_log_response.py | 1 -
.../requests/dashboard_configuration.py | 3 +-
src/humanloop/requests/datapoint_response.py | 4 +-
src/humanloop/requests/dataset_response.py | 8 +-
src/humanloop/requests/directory_response.py | 6 +-
...tory_with_parents_and_children_response.py | 6 +-
...arents_and_children_response_files_item.py | 9 +-
.../requests/environment_response.py | 3 +-
src/humanloop/requests/evaluatee_request.py | 1 -
src/humanloop/requests/evaluatee_response.py | 4 +-
.../requests/evaluation_evaluator_response.py | 3 +-
.../requests/evaluation_log_response.py | 4 +-
src/humanloop/requests/evaluation_response.py | 6 +-
.../requests/evaluation_run_response.py | 8 +-
.../requests/evaluation_runs_response.py | 3 +-
src/humanloop/requests/evaluation_stats.py | 4 +-
...aluator_activation_deactivation_request.py | 4 +-
...tion_deactivation_request_activate_item.py | 3 +-
...on_deactivation_request_deactivate_item.py | 3 +-
src/humanloop/requests/evaluator_aggregate.py | 3 +-
src/humanloop/requests/evaluator_file_id.py | 1 -
src/humanloop/requests/evaluator_file_path.py | 1 -
.../evaluator_judgment_number_limit.py | 1 -
.../evaluator_judgment_option_response.py | 1 -
.../requests/evaluator_log_response.py | 6 +-
src/humanloop/requests/evaluator_response.py | 14 +-
.../requests/evaluator_response_spec.py | 5 +-
.../requests/evaluator_version_id.py | 1 -
.../requests/external_evaluator_request.py | 8 +-
.../requests/file_environment_response.py | 4 +-
.../file_environment_response_file.py | 7 +-
src/humanloop/requests/file_id.py | 1 -
src/humanloop/requests/file_path.py | 1 -
src/humanloop/requests/file_request.py | 1 -
src/humanloop/requests/flow_kernel_request.py | 3 +-
src/humanloop/requests/flow_log_response.py | 10 +-
src/humanloop/requests/flow_response.py | 10 +-
src/humanloop/requests/function_tool.py | 1 -
.../requests/http_validation_error.py | 4 +-
.../requests/human_evaluator_request.py | 8 +-
src/humanloop/requests/image_chat_content.py | 3 +-
src/humanloop/requests/image_url.py | 1 -
src/humanloop/requests/linked_file_request.py | 1 -
.../requests/linked_tool_response.py | 4 +-
src/humanloop/requests/list_agents.py | 3 +-
src/humanloop/requests/list_datasets.py | 3 +-
src/humanloop/requests/list_evaluators.py | 3 +-
src/humanloop/requests/list_flows.py | 3 +-
src/humanloop/requests/list_prompts.py | 3 +-
src/humanloop/requests/list_tools.py | 3 +-
.../requests/llm_evaluator_request.py | 8 +-
src/humanloop/requests/log_response.py | 8 +-
src/humanloop/requests/log_stream_response.py | 3 +-
.../requests/monitoring_evaluator_response.py | 9 +-
.../numeric_evaluator_stats_response.py | 4 +-
.../requests/paginated_data_agent_response.py | 3 +-
.../paginated_data_evaluation_log_response.py | 3 +-
.../paginated_data_evaluator_response.py | 3 +-
.../requests/paginated_data_flow_response.py | 3 +-
.../requests/paginated_data_log_response.py | 3 +-
.../paginated_data_prompt_response.py | 3 +-
.../requests/paginated_data_tool_response.py | 3 +-
...r_response_flow_response_agent_response.py | 3 +-
...ow_response_agent_response_records_item.py | 7 +-
.../requests/paginated_datapoint_response.py | 3 +-
.../requests/paginated_dataset_response.py | 3 +-
.../requests/paginated_evaluation_response.py | 3 +-
.../requests/populate_template_response.py | 29 +-
...te_template_response_populated_template.py | 1 +
...late_template_response_reasoning_effort.py | 1 +
.../populate_template_response_template.py | 1 +
.../requests/prompt_call_log_response.py | 4 +-
.../requests/prompt_call_response.py | 8 +-
.../prompt_call_response_tool_choice.py | 1 +
.../requests/prompt_call_stream_response.py | 4 +-
.../requests/prompt_kernel_request.py | 10 +-
.../prompt_kernel_request_reasoning_effort.py | 1 +
.../prompt_kernel_request_template.py | 1 +
src/humanloop/requests/prompt_log_response.py | 10 +-
.../prompt_log_response_tool_choice.py | 1 +
src/humanloop/requests/prompt_response.py | 29 +-
.../prompt_response_reasoning_effort.py | 1 +
.../requests/prompt_response_template.py | 1 +
src/humanloop/requests/provider_api_keys.py | 1 -
src/humanloop/requests/response_format.py | 4 +-
src/humanloop/requests/run_stats_response.py | 6 +-
...run_stats_response_evaluator_stats_item.py | 3 +-
.../requests/run_version_response.py | 7 +-
.../select_evaluator_stats_response.py | 3 +-
src/humanloop/requests/text_chat_content.py | 3 +-
src/humanloop/requests/tool_call_response.py | 6 +-
src/humanloop/requests/tool_function.py | 4 +-
src/humanloop/requests/tool_kernel_request.py | 4 +-
src/humanloop/requests/tool_log_response.py | 8 +-
src/humanloop/requests/tool_response.py | 14 +-
.../requests/update_version_request.py | 1 -
src/humanloop/requests/validation_error.py | 3 +-
.../requests/version_deployment_response.py | 5 +-
.../version_deployment_response_file.py | 9 +-
src/humanloop/requests/version_id_response.py | 5 +-
.../requests/version_id_response_version.py | 9 +-
.../requests/version_reference_response.py | 2 +-
.../requests/version_stats_response.py | 4 +-
...s_response_evaluator_version_stats_item.py | 3 +-
src/humanloop/tools/__init__.py | 2 +
src/humanloop/tools/client.py | 836 +++------
src/humanloop/tools/raw_client.py | 560 ++++--
src/humanloop/types/__init__.py | 6 +-
src/humanloop/types/agent_call_response.py | 51 +-
.../types/agent_call_response_tool_choice.py | 1 +
.../types/agent_call_stream_response.py | 43 +-
.../agent_call_stream_response_payload.py | 3 +-
src/humanloop/types/agent_config_response.py | 5 +-
.../types/agent_continue_call_response.py | 51 +-
...gent_continue_call_response_tool_choice.py | 1 +
.../agent_continue_call_stream_response.py | 43 +-
...t_continue_call_stream_response_payload.py | 3 +-
.../types/agent_continue_response.py | 224 ---
.../agent_continue_response_tool_choice.py | 8 -
.../types/agent_continue_stream_response.py | 44 -
.../agent_continue_stream_response_payload.py | 8 -
src/humanloop/types/agent_inline_tool.py | 9 +-
src/humanloop/types/agent_kernel_request.py | 17 +-
.../agent_kernel_request_reasoning_effort.py | 1 +
.../types/agent_kernel_request_template.py | 1 +
.../types/agent_kernel_request_tools_item.py | 3 +-
.../types/agent_linked_file_request.py | 7 +-
.../types/agent_linked_file_response.py | 27 +-
.../types/agent_linked_file_response_file.py | 9 +-
src/humanloop/types/agent_log_response.py | 41 +-
.../types/agent_log_response_tool_choice.py | 1 +
.../types/agent_log_stream_response.py | 7 +-
src/humanloop/types/agent_response.py | 52 +-
.../types/agent_response_reasoning_effort.py | 1 +
.../types/agent_response_template.py | 1 +
.../types/agent_response_tools_item.py | 3 +-
.../anthropic_redacted_thinking_content.py | 3 +-
.../types/anthropic_thinking_content.py | 3 +-
.../types/boolean_evaluator_stats_response.py | 5 +-
src/humanloop/types/chat_message.py | 9 +-
src/humanloop/types/chat_message_content.py | 1 +
.../types/chat_message_content_item.py | 3 +-
.../types/chat_message_thinking_item.py | 3 +-
src/humanloop/types/code_evaluator_request.py | 11 +-
.../types/create_agent_log_response.py | 7 +-
.../types/create_datapoint_request.py | 5 +-
.../types/create_evaluator_log_response.py | 5 +-
.../types/create_flow_log_response.py | 7 +-
.../types/create_prompt_log_response.py | 5 +-
.../types/create_tool_log_response.py | 5 +-
.../types/dashboard_configuration.py | 7 +-
src/humanloop/types/datapoint_response.py | 5 +-
src/humanloop/types/dataset_response.py | 11 +-
src/humanloop/types/directory_response.py | 7 +-
...tory_with_parents_and_children_response.py | 33 +-
...arents_and_children_response_files_item.py | 9 +-
src/humanloop/types/environment_response.py | 7 +-
src/humanloop/types/evaluatee_request.py | 3 +-
src/humanloop/types/evaluatee_response.py | 33 +-
.../types/evaluation_evaluator_response.py | 33 +-
.../types/evaluation_log_response.py | 47 +-
src/humanloop/types/evaluation_response.py | 33 +-
.../types/evaluation_run_response.py | 35 +-
.../types/evaluation_runs_response.py | 31 +-
src/humanloop/types/evaluation_stats.py | 7 +-
...aluator_activation_deactivation_request.py | 7 +-
...tion_deactivation_request_activate_item.py | 3 +-
...on_deactivation_request_deactivate_item.py | 3 +-
src/humanloop/types/evaluator_aggregate.py | 7 +-
.../types/evaluator_config_response.py | 5 +-
src/humanloop/types/evaluator_file_id.py | 3 +-
src/humanloop/types/evaluator_file_path.py | 3 +-
.../types/evaluator_judgment_number_limit.py | 3 +-
.../evaluator_judgment_option_response.py | 7 +-
src/humanloop/types/evaluator_log_response.py | 41 +-
src/humanloop/types/evaluator_response.py | 33 +-
.../types/evaluator_response_spec.py | 5 +-
src/humanloop/types/evaluator_version_id.py | 5 +-
.../types/external_evaluator_request.py | 11 +-
.../types/file_environment_response.py | 33 +-
.../types/file_environment_response_file.py | 7 +-
.../file_environment_variable_request.py | 5 +-
src/humanloop/types/file_id.py | 3 +-
src/humanloop/types/file_path.py | 3 +-
src/humanloop/types/file_request.py | 3 +-
src/humanloop/types/file_sort_by.py | 5 +
src/humanloop/types/flow_kernel_request.py | 3 +-
src/humanloop/types/flow_log_response.py | 41 +-
src/humanloop/types/flow_response.py | 29 +-
src/humanloop/types/function_tool.py | 5 +-
src/humanloop/types/function_tool_choice.py | 5 +-
src/humanloop/types/http_validation_error.py | 7 +-
.../types/human_evaluator_request.py | 11 +-
src/humanloop/types/image_chat_content.py | 5 +-
src/humanloop/types/image_url.py | 7 +-
src/humanloop/types/input_response.py | 5 +-
src/humanloop/types/linked_file_request.py | 5 +-
src/humanloop/types/linked_tool_response.py | 5 +-
src/humanloop/types/list_agents.py | 31 +-
src/humanloop/types/list_datasets.py | 5 +-
src/humanloop/types/list_evaluators.py | 31 +-
src/humanloop/types/list_flows.py | 31 +-
src/humanloop/types/list_prompts.py | 31 +-
src/humanloop/types/list_tools.py | 31 +-
src/humanloop/types/llm_evaluator_request.py | 13 +-
src/humanloop/types/log_response.py | 8 +-
src/humanloop/types/log_stream_response.py | 3 +-
...onitoring_evaluator_environment_request.py | 5 +-
.../types/monitoring_evaluator_response.py | 29 +-
.../monitoring_evaluator_version_request.py | 5 +-
.../types/numeric_evaluator_stats_response.py | 5 +-
src/humanloop/types/overall_stats.py | 5 +-
.../types/paginated_data_agent_response.py | 31 +-
.../paginated_data_evaluation_log_response.py | 41 +-
.../paginated_data_evaluator_response.py | 31 +-
.../types/paginated_data_flow_response.py | 31 +-
.../types/paginated_data_log_response.py | 43 +-
.../types/paginated_data_prompt_response.py | 31 +-
.../types/paginated_data_tool_response.py | 31 +-
...r_response_flow_response_agent_response.py | 33 +-
...ow_response_agent_response_records_item.py | 7 +-
.../types/paginated_datapoint_response.py | 7 +-
.../types/paginated_dataset_response.py | 7 +-
.../types/paginated_evaluation_response.py | 31 +-
.../types/populate_template_response.py | 60 +-
...te_template_response_populated_template.py | 1 +
...late_template_response_reasoning_effort.py | 1 +
.../populate_template_response_template.py | 1 +
src/humanloop/types/project_sort_by.py | 5 -
.../types/prompt_call_log_response.py | 7 +-
src/humanloop/types/prompt_call_response.py | 35 +-
.../types/prompt_call_response_tool_choice.py | 1 +
.../types/prompt_call_stream_response.py | 7 +-
src/humanloop/types/prompt_kernel_request.py | 13 +-
.../prompt_kernel_request_reasoning_effort.py | 1 +
.../types/prompt_kernel_request_template.py | 1 +
src/humanloop/types/prompt_log_response.py | 43 +-
.../types/prompt_log_response_tool_choice.py | 1 +
src/humanloop/types/prompt_response.py | 50 +-
.../types/prompt_response_reasoning_effort.py | 1 +
.../types/prompt_response_template.py | 1 +
src/humanloop/types/provider_api_keys.py | 7 +-
src/humanloop/types/response_format.py | 5 +-
src/humanloop/types/run_stats_response.py | 9 +-
...run_stats_response_evaluator_stats_item.py | 3 +-
src/humanloop/types/run_version_response.py | 7 +-
.../types/select_evaluator_stats_response.py | 5 +-
src/humanloop/types/text_chat_content.py | 3 +-
.../types/text_evaluator_stats_response.py | 5 +-
src/humanloop/types/tool_call.py | 7 +-
src/humanloop/types/tool_call_response.py | 49 +-
src/humanloop/types/tool_choice.py | 7 +-
src/humanloop/types/tool_function.py | 5 +-
src/humanloop/types/tool_kernel_request.py | 5 +-
src/humanloop/types/tool_log_response.py | 41 +-
src/humanloop/types/tool_response.py | 35 +-
src/humanloop/types/update_version_request.py | 3 +-
src/humanloop/types/validation_error.py | 7 +-
.../types/version_deployment_response.py | 27 +-
.../types/version_deployment_response_file.py | 9 +-
src/humanloop/types/version_id.py | 5 +-
src/humanloop/types/version_id_response.py | 27 +-
.../types/version_id_response_version.py | 15 +-
.../types/version_reference_response.py | 1 +
src/humanloop/types/version_stats_response.py | 7 +-
...s_response_evaluator_version_stats_item.py | 3 +-
408 files changed, 7512 insertions(+), 9137 deletions(-)
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
delete mode 100644 src/humanloop/requests/agent_continue_response.py
delete mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
delete mode 100644 src/humanloop/requests/agent_continue_stream_response.py
delete mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
delete mode 100644 src/humanloop/types/agent_continue_response.py
delete mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
delete mode 100644 src/humanloop/types/agent_continue_stream_response.py
delete mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/file_sort_by.py
delete mode 100644 src/humanloop/types/project_sort_by.py
diff --git a/poetry.lock b/poetry.lock
index cfe8a240..915e9f5d 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -13,13 +13,13 @@ files = [
[[package]]
name = "anthropic"
-version = "0.50.0"
+version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"},
- {file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"},
+ {file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
+ {file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
]
[package.dependencies]
@@ -89,103 +89,103 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.4.1"
+version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
- {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
- {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
- {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"},
+ {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"},
+ {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.1"
+version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
- {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
+ {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
+ {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
]
[package.dependencies]
@@ -412,6 +412,26 @@ files = [
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
+[[package]]
+name = "hf-xet"
+version = "1.1.0"
+description = ""
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0322c42551e275fcb7949c083a54a81b2898e50787c9aa74284fcb8d2c58c12c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:667153a0304ac2debf2af95a8ff7687186f885b493f4cd16344869af270cd110"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:995eeffb119636ea617b96c7d7bf3c3f5ea8727fa57974574e25d700b8532d48"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3aee847da362393331f515c4010d0aaa1c2669acfcca1f4b28946d6949cc0086"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68c5813a6074aa36e12ef5983230e3b03148cce61e0fcdd294096493795565b4"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4ee9222bf9274b1c198b88a929de0b5a49349c4962d89c5b3b2f0f7f47d9761c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:73153eab9abf3d6973b21e94a67ccba5d595c3e12feb8c0bf50be02964e7f126"},
+ {file = "hf_xet-1.1.0.tar.gz", hash = "sha256:a7c2a4c2b6eee9ce0a1a367a82b60d95ba634420ef1c250addad7aa4af419cf4"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
[[package]]
name = "httpcore"
version = "1.0.9"
@@ -470,18 +490,19 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.30.2"
+version = "0.31.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"},
- {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"},
+ {file = "huggingface_hub-0.31.1-py3-none-any.whl", hash = "sha256:43f73124819b48b42d140cbc0d7a2e6bd15b2853b1b9d728d4d55ad1750cac5b"},
+ {file = "huggingface_hub-0.31.1.tar.gz", hash = "sha256:492bb5f545337aa9e2f59b75ef4c5f535a371e8958a6ce90af056387e67f1180"},
]
[package.dependencies]
filelock = "*"
fsspec = ">=2023.5.0"
+hf-xet = {version = ">=1.1.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""}
packaging = ">=20.9"
pyyaml = ">=5.1"
requests = "*"
@@ -494,7 +515,7 @@ cli = ["InquirerPy (==0.3.4)"]
dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
hf-transfer = ["hf-transfer (>=0.1.4)"]
-hf-xet = ["hf-xet (>=0.1.4)"]
+hf-xet = ["hf-xet (>=1.1.0,<2.0.0)"]
inference = ["aiohttp"]
quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"]
tensorflow = ["graphviz", "pydot", "tensorflow"]
@@ -771,48 +792,55 @@ type = ["mypy (==1.14.1)"]
[[package]]
name = "mypy"
-version = "1.0.1"
+version = "1.13.0"
description = "Optional static typing for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
- {file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
- {file = "mypy-1.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a0f74a298769d9fdc8498fcb4f2beb86f0564bcdb1a37b58cbbe78e55cf8c0"},
- {file = "mypy-1.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:65b122a993d9c81ea0bfde7689b3365318a88bde952e4dfa1b3a8b4ac05d168b"},
- {file = "mypy-1.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:5deb252fd42a77add936b463033a59b8e48eb2eaec2976d76b6878d031933fe4"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2013226d17f20468f34feddd6aae4635a55f79626549099354ce641bc7d40262"},
- {file = "mypy-1.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:48525aec92b47baed9b3380371ab8ab6e63a5aab317347dfe9e55e02aaad22e8"},
- {file = "mypy-1.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c96b8a0c019fe29040d520d9257d8c8f122a7343a8307bf8d6d4a43f5c5bfcc8"},
- {file = "mypy-1.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:448de661536d270ce04f2d7dddaa49b2fdba6e3bd8a83212164d4174ff43aa65"},
- {file = "mypy-1.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:d42a98e76070a365a1d1c220fcac8aa4ada12ae0db679cb4d910fabefc88b994"},
- {file = "mypy-1.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64f48c6176e243ad015e995de05af7f22bbe370dbb5b32bd6988438ec873919"},
- {file = "mypy-1.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fdd63e4f50e3538617887e9aee91855368d9fc1dea30da743837b0df7373bc4"},
- {file = "mypy-1.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbeb24514c4acbc78d205f85dd0e800f34062efcc1f4a4857c57e4b4b8712bff"},
- {file = "mypy-1.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a2948c40a7dd46c1c33765718936669dc1f628f134013b02ff5ac6c7ef6942bf"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5bc8d6bd3b274dd3846597855d96d38d947aedba18776aa998a8d46fabdaed76"},
- {file = "mypy-1.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:17455cda53eeee0a4adb6371a21dd3dbf465897de82843751cf822605d152c8c"},
- {file = "mypy-1.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e831662208055b006eef68392a768ff83596035ffd6d846786578ba1714ba8f6"},
- {file = "mypy-1.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e60d0b09f62ae97a94605c3f73fd952395286cf3e3b9e7b97f60b01ddfbbda88"},
- {file = "mypy-1.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:0af4f0e20706aadf4e6f8f8dc5ab739089146b83fd53cb4a7e0e850ef3de0bb6"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:24189f23dc66f83b839bd1cce2dfc356020dfc9a8bae03978477b15be61b062e"},
- {file = "mypy-1.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93a85495fb13dc484251b4c1fd7a5ac370cd0d812bbfc3b39c1bafefe95275d5"},
- {file = "mypy-1.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f546ac34093c6ce33f6278f7c88f0f147a4849386d3bf3ae193702f4fe31407"},
- {file = "mypy-1.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c6c2ccb7af7154673c591189c3687b013122c5a891bb5651eca3db8e6c6c55bd"},
- {file = "mypy-1.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:15b5a824b58c7c822c51bc66308e759243c32631896743f030daf449fe3677f3"},
- {file = "mypy-1.0.1-py3-none-any.whl", hash = "sha256:eda5c8b9949ed411ff752b9a01adda31afe7eae1e53e946dbdf9db23865e66c4"},
- {file = "mypy-1.0.1.tar.gz", hash = "sha256:28cea5a6392bb43d266782983b5a4216c25544cd7d80be681a155ddcdafd152d"},
+ {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"},
+ {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"},
+ {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"},
+ {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"},
+ {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"},
+ {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"},
+ {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"},
+ {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"},
+ {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"},
+ {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"},
+ {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"},
+ {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"},
+ {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"},
+ {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"},
+ {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"},
+ {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"},
+ {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"},
+ {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"},
+ {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"},
+ {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"},
+ {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"},
+ {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"},
+ {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"},
+ {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"},
+ {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"},
+ {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"},
+ {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"},
+ {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"},
+ {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"},
+ {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"},
+ {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"},
+ {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"},
]
[package.dependencies]
-mypy-extensions = ">=0.4.3"
+mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = ">=3.10"
+typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
+faster-cache = ["orjson"]
install-types = ["pip"]
-python2 = ["typed-ast (>=1.4.0,<2)"]
+mypyc = ["setuptools (>=50)"]
reports = ["lxml"]
[[package]]
@@ -873,13 +901,13 @@ files = [
[[package]]
name = "openai"
-version = "1.76.2"
+version = "1.78.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
- {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
+ {file = "openai-1.78.0-py3-none-any.whl", hash = "sha256:1ade6a48cd323ad8a7715e7e1669bb97a17e1a5b8a916644261aaef4bf284778"},
+ {file = "openai-1.78.0.tar.gz", hash = "sha256:254aef4980688468e96cbddb1f348ed01d274d02c64c6c69b0334bf001fb62b3"},
]
[package.dependencies]
@@ -899,13 +927,13 @@ voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
-version = "1.32.1"
+version = "1.33.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"},
- {file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"},
+ {file = "opentelemetry_api-1.33.0-py3-none-any.whl", hash = "sha256:158df154f628e6615b65fdf6e59f99afabea7213e72c5809dd4adf06c0d997cd"},
+ {file = "opentelemetry_api-1.33.0.tar.gz", hash = "sha256:cc4380fd2e6da7dcb52a828ea81844ed1f4f2eb638ca3c816775109d93d58ced"},
]
[package.dependencies]
@@ -914,30 +942,30 @@ importlib-metadata = ">=6.0,<8.7.0"
[[package]]
name = "opentelemetry-instrumentation"
-version = "0.53b1"
+version = "0.54b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"},
- {file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"},
+ {file = "opentelemetry_instrumentation-0.54b0-py3-none-any.whl", hash = "sha256:1a502238f8af65625ad48800d268d467653e319d959e1732d3b3248916d21327"},
+ {file = "opentelemetry_instrumentation-0.54b0.tar.gz", hash = "sha256:2949d0bbf2316eb5d928a5ef610d0a8a2c261ba80167d878abf6016e1c4ae7bb"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
-opentelemetry-semantic-conventions = "0.53b1"
+opentelemetry-semantic-conventions = "0.54b0"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
- {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3-py3-none-any.whl", hash = "sha256:152a7968d86ade48ffb4df526129def598e8e3eeb1c4fb11a5e6a3bbc94c0fd4"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3.tar.gz", hash = "sha256:5e40a9d3342d800180d29e028f3d1aa5db3e0ec482362a7eef054ee500fb8d4f"},
]
[package.dependencies]
@@ -948,13 +976,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
- {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3-py3-none-any.whl", hash = "sha256:cc8ea0358f57876ad12bbcbc9b1ace4b97bf9d8d5d7703a7a55135811b0b433a"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3.tar.gz", hash = "sha256:bcb5060060d0ec25bd8c08332eadd23d57ceea1e042fed5e7a7664e5a2fcb817"},
]
[package.dependencies]
@@ -967,13 +995,13 @@ tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
- {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3-py3-none-any.whl", hash = "sha256:d39d058ae5cffe02908c1c242b71dc449d07df71c60d7a37159a898e38c6a15c"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3.tar.gz", hash = "sha256:23f6f237f7cdef661549b3f7d02dc8b1c69ce0cbf3e0050c05ce3f443458fe35"},
]
[package.dependencies]
@@ -984,13 +1012,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
- {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3-py3-none-any.whl", hash = "sha256:3c3c324ab0b49323f268dc54b60fe06aaee04b5bac0f901a70251d4931b611bc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3.tar.gz", hash = "sha256:b246b258d28ac5af429688b9948c37ced88cba4a7f8f99f629f9b42fcbe36e47"},
]
[package.dependencies]
@@ -1001,13 +1029,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
- {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3-py3-none-any.whl", hash = "sha256:77e55609fef78d1a81a61aeac667b6423d19f3f3936c4a219963fba0559dae44"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3.tar.gz", hash = "sha256:8e7f260f3c3e25f445281238552c80cbd724c2d61fee4ad9360a86a2e0015114"},
]
[package.dependencies]
@@ -1019,13 +1047,13 @@ tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
- {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3-py3-none-any.whl", hash = "sha256:24a3ae27137521a4f4cfa523c36be7cedf2ad57eed9fef5f1bfb2a3e2c8aee9e"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3.tar.gz", hash = "sha256:b62dcee6b8afe6dc30ad98b18014fdd1b6851fc5bd6d0c4dc6c40bff16ce407d"},
]
[package.dependencies]
@@ -1036,13 +1064,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-proto"
-version = "1.32.1"
+version = "1.33.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"},
- {file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"},
+ {file = "opentelemetry_proto-1.33.0-py3-none-any.whl", hash = "sha256:84a1d7daacac4aa0f24a5b1190a3e0619011dbff56f945fc2b6fc0a18f48b942"},
+ {file = "opentelemetry_proto-1.33.0.tar.gz", hash = "sha256:ec5aa35486c990207ead2512a8d616d1b324928562c91dbc7e0cb9aa48c60b7b"},
]
[package.dependencies]
@@ -1050,34 +1078,34 @@ protobuf = ">=5.0,<6.0"
[[package]]
name = "opentelemetry-sdk"
-version = "1.32.1"
+version = "1.33.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"},
- {file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"},
+ {file = "opentelemetry_sdk-1.33.0-py3-none-any.whl", hash = "sha256:bed376b6d37fbf00688bb65edfee817dd01d48b8559212831437529a6066049a"},
+ {file = "opentelemetry_sdk-1.33.0.tar.gz", hash = "sha256:a7fc56d1e07b218fcc316b24d21b59d3f1967b2ca22c217b05da3a26b797cc68"},
]
[package.dependencies]
-opentelemetry-api = "1.32.1"
-opentelemetry-semantic-conventions = "0.53b1"
+opentelemetry-api = "1.33.0"
+opentelemetry-semantic-conventions = "0.54b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
-version = "0.53b1"
+version = "0.54b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
files = [
- {file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"},
- {file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"},
+ {file = "opentelemetry_semantic_conventions-0.54b0-py3-none-any.whl", hash = "sha256:fad7c1cf8908fd449eb5cf9fbbeefb301acf4bc995101f85277899cec125d823"},
+ {file = "opentelemetry_semantic_conventions-0.54b0.tar.gz", hash = "sha256:467b739977bdcb079af1af69f73632535cdb51099d5e3c5709a35d10fe02a9c9"},
]
[package.dependencies]
deprecated = ">=1.2.6"
-opentelemetry-api = "1.32.1"
+opentelemetry-api = "1.33.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
@@ -1092,13 +1120,13 @@ files = [
[[package]]
name = "orderly-set"
-version = "5.4.0"
+version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
files = [
- {file = "orderly_set-5.4.0-py3-none-any.whl", hash = "sha256:f0192a7f9ae3385b587b71688353fae491d1ca45878496eb71ea118be1623639"},
- {file = "orderly_set-5.4.0.tar.gz", hash = "sha256:c8ff5ba824abe4eebcbbdd3f646ff3648ad0dd52239319d90056d8d30b6cccdd"},
+ {file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
+ {file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
]
[[package]]
@@ -1890,29 +1918,29 @@ files = [
[[package]]
name = "ruff"
-version = "0.5.7"
+version = "0.11.5"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
- {file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
- {file = "ruff-0.5.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:eaf3d86a1fdac1aec8a3417a63587d93f906c678bb9ed0b796da7b59c1114a1e"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a01c34400097b06cf8a6e61b35d6d456d5bd1ae6961542de18ec81eaf33b4cb8"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fcc8054f1a717e2213500edaddcf1dbb0abad40d98e1bd9d0ad364f75c763eea"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f70284e73f36558ef51602254451e50dd6cc479f8b6f8413a95fcb5db4a55fc"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a78ad870ae3c460394fc95437d43deb5c04b5c29297815a2a1de028903f19692"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ccd078c66a8e419475174bfe60a69adb36ce04f8d4e91b006f1329d5cd44bcf"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e31c9bad4ebf8fdb77b59cae75814440731060a09a0e0077d559a556453acbb"},
- {file = "ruff-0.5.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d796327eed8e168164346b769dd9a27a70e0298d667b4ecee6877ce8095ec8e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a09ea2c3f7778cc635e7f6edf57d566a8ee8f485f3c4454db7771efb692c499"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a36d8dcf55b3a3bc353270d544fb170d75d2dff41eba5df57b4e0b67a95bb64e"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9369c218f789eefbd1b8d82a8cf25017b523ac47d96b2f531eba73770971c9e5"},
- {file = "ruff-0.5.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:b88ca3db7eb377eb24fb7c82840546fb7acef75af4a74bd36e9ceb37a890257e"},
- {file = "ruff-0.5.7-py3-none-win32.whl", hash = "sha256:33d61fc0e902198a3e55719f4be6b375b28f860b09c281e4bdbf783c0566576a"},
- {file = "ruff-0.5.7-py3-none-win_amd64.whl", hash = "sha256:083bbcbe6fadb93cd86709037acc510f86eed5a314203079df174c40bbbca6b3"},
- {file = "ruff-0.5.7-py3-none-win_arm64.whl", hash = "sha256:2dca26154ff9571995107221d0aeaad0e75a77b5a682d6236cf89a58c70b76f4"},
- {file = "ruff-0.5.7.tar.gz", hash = "sha256:8dfc0a458797f5d9fb622dd0efc52d796f23f0a1493a9527f4e49a550ae9a7e5"},
+ {file = "ruff-0.11.5-py3-none-linux_armv6l.whl", hash = "sha256:2561294e108eb648e50f210671cc56aee590fb6167b594144401532138c66c7b"},
+ {file = "ruff-0.11.5-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ac12884b9e005c12d0bd121f56ccf8033e1614f736f766c118ad60780882a077"},
+ {file = "ruff-0.11.5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4bfd80a6ec559a5eeb96c33f832418bf0fb96752de0539905cf7b0cc1d31d779"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0947c0a1afa75dcb5db4b34b070ec2bccee869d40e6cc8ab25aca11a7d527794"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ad871ff74b5ec9caa66cb725b85d4ef89b53f8170f47c3406e32ef040400b038"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6cf918390cfe46d240732d4d72fa6e18e528ca1f60e318a10835cf2fa3dc19f"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56145ee1478582f61c08f21076dc59153310d606ad663acc00ea3ab5b2125f82"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e5f66f8f1e8c9fc594cbd66fbc5f246a8d91f916cb9667e80208663ec3728304"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80b4df4d335a80315ab9afc81ed1cff62be112bd165e162b5eed8ac55bfc8470"},
+ {file = "ruff-0.11.5-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3068befab73620b8a0cc2431bd46b3cd619bc17d6f7695a3e1bb166b652c382a"},
+ {file = "ruff-0.11.5-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f5da2e710a9641828e09aa98b92c9ebbc60518fdf3921241326ca3e8f8e55b8b"},
+ {file = "ruff-0.11.5-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ef39f19cb8ec98cbc762344921e216f3857a06c47412030374fffd413fb8fd3a"},
+ {file = "ruff-0.11.5-py3-none-musllinux_1_2_i686.whl", hash = "sha256:b2a7cedf47244f431fd11aa5a7e2806dda2e0c365873bda7834e8f7d785ae159"},
+ {file = "ruff-0.11.5-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:81be52e7519f3d1a0beadcf8e974715b2dfc808ae8ec729ecfc79bddf8dbb783"},
+ {file = "ruff-0.11.5-py3-none-win32.whl", hash = "sha256:e268da7b40f56e3eca571508a7e567e794f9bfcc0f412c4b607931d3af9c4afe"},
+ {file = "ruff-0.11.5-py3-none-win_amd64.whl", hash = "sha256:6c6dc38af3cfe2863213ea25b6dc616d679205732dc0fb673356c2d69608f800"},
+ {file = "ruff-0.11.5-py3-none-win_arm64.whl", hash = "sha256:67e241b4314f4eacf14a601d586026a962f4002a475aa702c69980a38087aa4e"},
+ {file = "ruff-0.11.5.tar.gz", hash = "sha256:cae2e2439cb88853e421901ec040a758960b576126dab520fa08e9de431d1bef"},
]
[[package]]
@@ -2291,4 +2319,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9,<4"
-content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d"
+content-hash = "87939a81a476edadf123c149e55430106c951aa853aa78fc4a54b7eeae7df7c5"
diff --git a/pyproject.toml b/pyproject.toml
index 9dddf812..b86cae56 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.36b1"
+version = "0.8.36b2"
description = ""
readme = "README.md"
authors = []
@@ -54,8 +54,8 @@ pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
-[tool.poetry.dev-dependencies]
-mypy = "1.0.1"
+[tool.poetry.group.dev.dependencies]
+mypy = "==1.13.0"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
python-dateutil = "^2.9.0"
@@ -72,7 +72,7 @@ pyarrow = "^19.0.0"
pytest-retry = "^1.6.3"
python-dotenv = "^1.0.1"
replicate = "^1.0.3"
-ruff = "^0.5.6"
+ruff = "==0.11.5"
types-jsonschema = "^4.23.0.20240813"
types-protobuf = "^5.29.1.20250208"
@@ -86,6 +86,26 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.ruff.lint]
+select = [
+ "E", # pycodestyle errors
+ "F", # pyflakes
+ "I", # isort
+]
+ignore = [
+ "E402", # Module level import not at top of file
+ "E501", # Line too long
+ "E711", # Comparison to `None` should be `cond is not None`
+ "E712", # Avoid equality comparisons to `True`; use `if ...:` checks
+ "E721", # Use `is` and `is not` for type comparisons, or `isinstance()` for insinstance checks
+ "E722", # Do not use bare `except`
+ "E731", # Do not assign a `lambda` expression, use a `def`
+ "F821", # Undefined name
+ "F841" # Local variable ... is assigned to but never used
+]
+
+[tool.ruff.lint.isort]
+section-order = ["future", "standard-library", "third-party", "first-party"]
[build-system]
requires = ["poetry-core"]
diff --git a/reference.md b/reference.md
index 26f361d0..3e7a27b8 100644
--- a/reference.md
+++ b/reference.md
@@ -35,40 +35,11 @@ in the case where you are storing or deriving your Prompt details in code.
```python
-import datetime
-
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.log(
- path="persona",
- prompt={
- "model": "gpt-4",
- "template": [
- {
- "role": "system",
- "content": "You are {{person}}. Answer questions as this person. Do not break character.",
- }
- ],
- },
- messages=[{"role": "user", "content": "What really happened at Roswell?"}],
- inputs={"person": "Trump"},
- created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
- ),
- provider_latency=6.5931549072265625,
- output_message={
- "content": "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.",
- "role": "assistant",
- },
- prompt_tokens=100,
- output_tokens=220,
- prompt_cost=1e-05,
- output_cost=0.0002,
- finish_reason="stop",
-)
+import datetime
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+}, created_at=datetime.datetime.fromisoformat("2024-07-19 00:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
```
@@ -202,7 +173,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -404,14 +380,8 @@ Update the details of a Log with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.update_log(
- id="id",
- log_id="log_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.update_log(id='id', log_id='log_id', )
```
@@ -676,10 +646,7 @@ in the case where you are storing or deriving your Prompt details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
response = client.prompts.call_stream()
for chunk in response.data:
yield chunk
@@ -752,7 +719,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -947,16 +919,9 @@ in the case where you are storing or deriving your Prompt details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.call(
- version_id="prv_Wu6zx1lAWJRqOyL8nWuZk",
- path="persona",
- messages=[{"role": "user", "content": "What really happened at Roswell?"}],
- inputs={"person": "Trump"},
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.call(version_id='prv_Wu6zx1lAWJRqOyL8nWuZk', path='persona', messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+}, )
```
@@ -1026,7 +991,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1210,13 +1180,8 @@ Get a list of all Prompts.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.prompts.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.prompts.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -1269,7 +1234,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Prompts by
@@ -1331,26 +1296,8 @@ that already exists will result in a 409 Conflict error.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.upsert(
- path="Personal Projects/Coding Assistant",
- model="gpt-4o",
- endpoint="chat",
- template=[
- {
- "content": "You are a helpful coding assistant specialising in {{language}}",
- "role": "system",
- }
- ],
- provider="openai",
- max_tokens=-1,
- temperature=0.7,
- version_name="coding-assistant-v1",
- version_description="Initial version",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', )
```
@@ -1616,13 +1563,8 @@ By default, the deployed version of the Prompt is returned. Use the query parame
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.get(
- id="pr_30gco7dx6JDq4200GVOHa",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', )
```
@@ -1702,13 +1644,8 @@ Delete the Prompt with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.delete(
- id="pr_30gco7dx6JDq4200GVOHa",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', )
```
@@ -1772,14 +1709,8 @@ Move the Prompt to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.move(
- id="pr_30gco7dx6JDq4200GVOHa",
- path="new directory/new name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', )
```
@@ -1862,14 +1793,9 @@ By default, the deployed version of the Prompt is returned. Use the query parame
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.populate(
- id="id",
- request={"key": "value"},
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.populate(id='id', request={'key': 'value'
+}, )
```
@@ -1957,13 +1883,8 @@ Get a list of all the versions of a Prompt.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.list_versions(
- id="pr_30gco7dx6JDq4200GVOHa",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', )
```
@@ -2035,14 +1956,8 @@ Delete a version of the Prompt.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.delete_prompt_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.delete_prompt_version(id='id', version_id='version_id', )
```
@@ -2114,14 +2029,8 @@ Update the name or description of the Prompt version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.patch_prompt_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.patch_prompt_version(id='id', version_id='version_id', )
```
@@ -2212,15 +2121,8 @@ will be used for calls made to the Prompt in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
```
@@ -2303,14 +2205,8 @@ will no longer be used for calls made to the Prompt in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.remove_deployment(
- id="id",
- environment_id="environment_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.remove_deployment(id='id', environment_id='environment_id', )
```
@@ -2382,13 +2278,8 @@ List all Environments and their deployed versions for the Prompt.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.list_environments(
- id="pr_30gco7dx6JDq4200GVOHa",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', )
```
@@ -2455,14 +2346,8 @@ within the Prompt for monitoring purposes.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.update_monitoring(
- id="pr_30gco7dx6JDq4200GVOHa",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
```
@@ -2486,9 +2371,7 @@ client.prompts.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**activate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -2496,9 +2379,7 @@ client.prompts.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**deactivate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -2552,13 +2433,8 @@ By default, the deployed version of the Prompt is returned. Use the query parame
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.serialize(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.serialize(id='id', )
```
@@ -2641,13 +2517,8 @@ This subset is the bit that defines the Prompt version (e.g. with `model` and `t
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.prompts.deserialize(
- prompt="prompt",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.prompts.deserialize(prompt='prompt', )
```
@@ -2722,10 +2593,7 @@ in the case where you are storing or deriving your Tool details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
client.tools.call()
```
@@ -2926,29 +2794,13 @@ in the case where you are storing or deriving your Tool details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+, 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+, 'required': ['a', 'b']
+}}}, inputs={'a': 5
+, 'b': 7
+}, output='35', )
```
@@ -3198,14 +3050,8 @@ Update the details of a Log with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.update(
- id="id",
- log_id="log_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.update(id='id', log_id='log_id', )
```
@@ -3381,13 +3227,8 @@ Get a list of all Tools.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.tools.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.tools.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -3440,7 +3281,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Tools by
@@ -3502,24 +3343,11 @@ that already exists will result in a 409 Conflict error.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+, 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+, 'required': ['a', 'b']
+}}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', )
```
@@ -3650,13 +3478,8 @@ By default, the deployed version of the Tool is returned. Use the query paramete
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get(
- id="tl_789ghi",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.get(id='tl_789ghi', )
```
@@ -3736,13 +3559,8 @@ Delete the Tool with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.delete(
- id="tl_789ghi",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.delete(id='tl_789ghi', )
```
@@ -3806,14 +3624,8 @@ Move the Tool to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.move(id='tl_789ghi', path='new directory/new name', )
```
@@ -3893,13 +3705,8 @@ Get a list of all the versions of a Tool.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.list_versions(
- id="tl_789ghi",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.list_versions(id='tl_789ghi', )
```
@@ -3971,14 +3778,8 @@ Delete a version of the Tool.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.delete_tool_version(id='id', version_id='version_id', )
```
@@ -4050,14 +3851,8 @@ Update the name or description of the Tool version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.update_tool_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.update_tool_version(id='id', version_id='version_id', )
```
@@ -4148,15 +3943,8 @@ will be used for calls made to the Tool in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.set_deployment(
- id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', )
```
@@ -4239,14 +4027,8 @@ will no longer be used for calls made to the Tool in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.remove_deployment(
- id="tl_789ghi",
- environment_id="staging",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', )
```
@@ -4318,13 +4100,8 @@ List all Environments and their deployed versions for the Tool.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.list_environments(
- id="tl_789ghi",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.list_environments(id='tl_789ghi', )
```
@@ -4391,14 +4168,8 @@ within the Tool for monitoring purposes.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
```
@@ -4422,9 +4193,7 @@ client.tools.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**activate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -4432,9 +4201,7 @@ client.tools.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**deactivate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -4468,13 +4235,8 @@ client.tools.update_monitoring(
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.get_environment_variables(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.get_environment_variables(id='id', )
```
@@ -4538,14 +4300,8 @@ Add an environment variable to a Tool.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.add_environment_variable(
- id="id",
- request=[{"name": "name", "value": "value"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], )
```
@@ -4603,14 +4359,8 @@ client.tools.add_environment_variable(
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.tools.delete_environment_variable(
- id="id",
- name="name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.tools.delete_environment_variable(id='id', name='name', )
```
@@ -4683,13 +4433,8 @@ List all Datasets.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.datasets.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.datasets.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -4742,7 +4487,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Datasets by
@@ -4814,39 +4559,8 @@ you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id:
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.upsert(
- path="datasets/support-queries",
- datapoints=[
- {
- "messages": [
- {
- "role": "user",
- "content": "How do i manage my organizations API keys?\n",
- }
- ],
- "target": {
- "response": 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'
- },
- },
- {
- "messages": [
- {
- "role": "user",
- "content": "Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?",
- }
- ],
- "target": {
- "response": "Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?"
- },
- },
- ],
- version_name="Initial version",
- version_description="Add two new questions and answers",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.upsert(path='datasets/support-queries', datapoints=[{'messages': [{'role': "user", 'content': 'How do i manage my organizations API keys?\n'}], 'target': {'response': 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Log in to the Humanloop Dashboard \n\n2. Click on "Organization Settings."\n If you do not see this option, you might need to contact your organization admin to gain the necessary permissions.\n\n3. Within the settings or organization settings, select the option labeled "API Keys" on the left. Here you will be able to view and manage your API keys.\n\n4. You will see a list of existing API keys. You can perform various actions, such as:\n - **Generate New API Key:** Click on the "Generate New Key" button if you need a new API key.\n - **Revoke an API Key:** If you need to disable an existing key, find the key in the list and click the "Revoke" or "Delete" button.\n - **Copy an API Key:** If you need to use an existing key, you can copy it to your clipboard by clicking the "Copy" button next to the key.\n\n5. **Save and Secure API Keys:** Make sure to securely store any new or existing API keys you are using. Treat them like passwords and do not share them publicly.\n\nIf you encounter any issues or need further assistance, it might be helpful to engage with an engineer or your IT department to ensure you have the necessary permissions and support.\n\nWould you need help with anything else?'}}, {'messages': [{'role': "user", 'content': 'Hey, can do I use my code evaluator for monitoring my legal-copilot prompt?'}], 'target': {'response': 'Hey, thanks for your questions. Here are steps for how to achieve: 1. Navigate to your Prompt dashboard. \n 2. Select the `Monitoring` button on the top right of the Prompt dashboard \n 3. Within the model select the Version of the Evaluator you want to turn on for monitoring. \n\nWould you need help with anything else?'}}], version_name='Initial version', version_description='Add two new questions and answers', )
```
@@ -4998,15 +4712,8 @@ By default, the deployed version of the Dataset is returned. Use the query param
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, )
```
@@ -5094,13 +4801,8 @@ Delete the Dataset with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.delete(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.delete(id='id', )
```
@@ -5164,13 +4866,8 @@ Move the Dataset to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.move(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.move(id='id', )
```
@@ -5250,14 +4947,8 @@ List all Datapoints for the Dataset with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -5358,13 +5049,8 @@ Get a list of the versions for a Dataset.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.list_versions(
- id="ds_b0baF1ca7652",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.list_versions(id='ds_b0baF1ca7652', )
```
@@ -5436,14 +5122,8 @@ Delete a version of the Dataset.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.delete_dataset_version(id='id', version_id='version_id', )
```
@@ -5515,14 +5195,8 @@ Update the name or description of the Dataset version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.update_dataset_version(id='id', version_id='version_id', )
```
@@ -5620,13 +5294,8 @@ and `version_description` parameters.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.upload_csv(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.upload_csv(id='id', )
```
@@ -5651,7 +5320,6 @@ client.datasets.upload_csv(
**file:** `from __future__ import annotations
-
core.File` — See core.File for more documentation
@@ -5734,15 +5402,8 @@ Set the deployed version for the specified Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', )
```
@@ -5824,14 +5485,8 @@ Remove the deployed version for the specified Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', )
```
@@ -5903,13 +5558,8 @@ List all Environments and their deployed versions for the Dataset.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.datasets.list_environments(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.datasets.list_environments(id='id', )
```
@@ -5976,13 +5626,8 @@ Creates a new Log. The evaluated Log will be set as the parent of the created Lo
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.log(
- parent_id="parent_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.log(parent_id='parent_id', )
```
@@ -6262,13 +5907,8 @@ Get a list of all Evaluators.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.evaluators.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.evaluators.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -6321,7 +5961,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Evaluators by
@@ -6383,21 +6023,8 @@ that already exists will result in a 409 Conflict error.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', )
```
@@ -6496,13 +6123,8 @@ By default, the deployed version of the Evaluator is returned. Use the query par
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.get(
- id="ev_890bcd",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.get(id='ev_890bcd', )
```
@@ -6582,13 +6204,8 @@ Delete the Evaluator with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete(
- id="ev_890bcd",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.delete(id='ev_890bcd', )
```
@@ -6652,14 +6269,8 @@ Move the Evaluator to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.move(id='ev_890bcd', path='new directory/new name', )
```
@@ -6739,13 +6350,8 @@ Get a list of all the versions of an Evaluator.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_versions(
- id="ev_890bcd",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.list_versions(id='ev_890bcd', )
```
@@ -6817,14 +6423,8 @@ Delete a version of the Evaluator.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.delete_evaluator_version(id='id', version_id='version_id', )
```
@@ -6896,14 +6496,8 @@ Update the name or description of the Evaluator version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.update_evaluator_version(id='id', version_id='version_id', )
```
@@ -6994,15 +6588,8 @@ will be used for calls made to the Evaluator in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', )
```
@@ -7085,14 +6672,8 @@ will no longer be used for calls made to the Evaluator in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', )
```
@@ -7164,13 +6745,8 @@ List all Environments and their deployed versions for the Evaluator.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.list_environments(
- id="ev_890bcd",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.list_environments(id='ev_890bcd', )
```
@@ -7237,13 +6813,8 @@ within the Evaluator for monitoring purposes.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluators.update_monitoring(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluators.update_monitoring(id='id', )
```
@@ -7267,9 +6838,7 @@ client.evaluators.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**activate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -7277,9 +6846,7 @@ client.evaluators.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**deactivate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -7333,41 +6900,13 @@ in order to trigger Evaluators.
```python
-import datetime
-
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
- ),
-)
+import datetime
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8}
+, 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+}}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.'
+}, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 22:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 22:40:39+00:00", ), )
```
@@ -7644,18 +7183,9 @@ The end_time log attribute will be set to match the time the log is marked as co
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.'
+}, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", )
```
@@ -7770,13 +7300,8 @@ By default, the deployed version of the Flow is returned. Use the query paramete
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', )
```
@@ -7856,13 +7381,8 @@ Delete the Flow with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', )
```
@@ -7926,14 +7446,8 @@ Move the Flow to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', )
```
@@ -8021,13 +7535,8 @@ Get a list of Flows.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.flows.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.flows.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -8080,7 +7589,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Flows by
@@ -8142,27 +7651,12 @@ that already exists will result in a 409 Conflict error.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8}
+, 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+, 'version_name': 'medqa-flow-v1'
+, 'version_description': 'Initial version'
+}, )
```
@@ -8258,13 +7752,8 @@ Get a list of all the versions of a Flow.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', )
```
@@ -8336,14 +7825,8 @@ Delete a version of the Flow.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.delete_flow_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.delete_flow_version(id='id', version_id='version_id', )
```
@@ -8415,14 +7898,8 @@ Update the name or description of the Flow version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.update_flow_version(
- id="id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.update_flow_version(id='id', version_id='version_id', )
```
@@ -8513,15 +7990,8 @@ will be used for calls made to the Flow in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', )
```
@@ -8604,14 +8074,8 @@ will no longer be used for calls made to the Flow in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', )
```
@@ -8683,13 +8147,8 @@ List all Environments and their deployed versions for the Flow.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', )
```
@@ -8756,14 +8215,8 @@ within the Flow for monitoring purposes.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
```
@@ -8787,9 +8240,7 @@ client.flows.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**activate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -8797,9 +8248,7 @@ client.flows.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**deactivate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -8854,56 +8303,12 @@ in order to trigger Evaluators.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+, 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+, 'additionalProperties': False
+, 'required': ['output']
+}, 'strict': True}, 'on_agent_call': "stop"}]}, )
```
@@ -9037,7 +8442,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9239,26 +8649,8 @@ Update the details of a Log with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", )
```
@@ -9392,10 +8784,7 @@ your Agent details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
response = client.agents.call_stream()
for chunk in response.data:
yield chunk
@@ -9468,7 +8857,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9650,19 +9044,8 @@ your Agent details in code.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
```
@@ -9732,7 +9115,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9908,14 +9296,8 @@ The original log must be in an incomplete state to be continued.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.agents.continue_call_stream(
- log_id="log_id",
- messages=[{"role": "user"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], )
for chunk in response.data:
yield chunk
@@ -10013,20 +9395,8 @@ The original log must be in an incomplete state to be continued.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], )
```
@@ -10114,13 +9484,8 @@ Get a list of all Agents.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.agents.list(
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.agents.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -10173,7 +9538,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Agents by
@@ -10236,48 +9601,12 @@ that already exists will result in a 409 Conflict error.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+, 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+, 'additionalProperties': False
+, 'required': ['output']
+}, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', )
```
@@ -10540,14 +9869,8 @@ Delete a version of the Agent.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', )
```
@@ -10619,16 +9942,8 @@ Update the name or description of the Agent version.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', )
```
@@ -10719,13 +10034,8 @@ By default, the deployed version of the Agent is returned. Use the query paramet
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.get(
- id="ag_1234567890",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.get(id='ag_1234567890', )
```
@@ -10805,13 +10115,8 @@ Delete the Agent with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.delete(
- id="ag_1234567890",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.delete(id='ag_1234567890', )
```
@@ -10875,14 +10180,8 @@ Move the Agent to a different path or change the name.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.move(id='ag_1234567890', path='new directory/new name', )
```
@@ -10970,13 +10269,8 @@ Get a list of all the versions of a Agent.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.list_versions(
- id="ag_1234567890",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.list_versions(id='ag_1234567890', )
```
@@ -11051,15 +10345,8 @@ will be used for calls made to the Agent in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
```
@@ -11142,14 +10429,8 @@ will no longer be used for calls made to the Agent in this Environment.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.remove_deployment(
- id="id",
- environment_id="environment_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.remove_deployment(id='id', environment_id='environment_id', )
```
@@ -11221,13 +10502,8 @@ List all Environments and their deployed versions for the Agent.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.list_environments(
- id="ag_1234567890",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.list_environments(id='ag_1234567890', )
```
@@ -11294,18 +10570,8 @@ within the Agent for monitoring purposes.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], )
```
@@ -11329,9 +10595,7 @@ client.agents.update_monitoring(
-
-**activate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]
-]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
+**activate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]]` — Evaluators to activate for Monitoring. These will be automatically run on new Logs.
@@ -11339,9 +10603,7 @@ client.agents.update_monitoring(
-
-**deactivate:** `typing.Optional[
- typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]
-]` — Evaluators to deactivate. These will not be run on new Logs.
+**deactivate:** `typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]]` — Evaluators to deactivate. These will not be run on new Logs.
@@ -11395,13 +10657,8 @@ By default, the deployed version of the Agent is returned. Use the query paramet
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.serialize(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.serialize(id='id', )
```
@@ -11484,13 +10741,8 @@ This subset is the bit that defines the Agent version (e.g. with `model` and `te
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.agents.deserialize(
- agent="agent",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.agents.deserialize(agent='agent', )
```
@@ -11555,10 +10807,7 @@ Retrieve a list of all Directories.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
client.directories.list()
```
@@ -11615,10 +10864,7 @@ Creates a Directory.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
client.directories.create()
```
@@ -11699,13 +10945,8 @@ Fetches a directory by ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.directories.get(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.directories.get(id='id', )
```
@@ -11771,13 +11012,8 @@ The Directory must be empty (i.e. contain no Directories or Files).
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.directories.delete(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.directories.delete(id='id', )
```
@@ -11841,13 +11077,8 @@ Update the Directory with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.directories.update(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.directories.update(id='id', )
```
@@ -11936,10 +11167,7 @@ Get a paginated list of files.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
client.files.list_files()
```
@@ -11980,6 +11208,14 @@ client.files.list_files()
-
+**path:** `typing.Optional[str]` — Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -12004,7 +11240,7 @@ client.files.list_files()
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort files by
@@ -12020,6 +11256,14 @@ client.files.list_files()
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12060,13 +11304,8 @@ Retrieve a File by path.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.files.retrieve_by_path(
- path="path",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.files.retrieve_by_path(path='path', )
```
@@ -12098,6 +11337,14 @@ client.files.retrieve_by_path(
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12139,14 +11386,8 @@ Retrieve a list of Evaluations for the specified File.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.evaluations.list(
- file_id="pr_30gco7dx6JDq4200GVOHa",
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -12235,13 +11476,8 @@ You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs`
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.create(
- evaluators=[{"version_id": "version_id"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.create(evaluators=[{'version_id': 'version_id'}], )
```
@@ -12323,14 +11559,8 @@ The Evaluators will be run on the Logs generated for the Evaluation.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.add_evaluators(
- id="id",
- evaluators=[{"version_id": "version_id"}],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], )
```
@@ -12404,14 +11634,8 @@ The Evaluator will no longer be run on the Logs in the Evaluation.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.remove_evaluator(
- id="id",
- evaluator_version_id="evaluator_version_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', )
```
@@ -12489,13 +11713,8 @@ To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endp
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.get(
- id="ev_567yza",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.get(id='ev_567yza', )
```
@@ -12561,13 +11780,8 @@ The Runs and Evaluators in the Evaluation will not be deleted.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.delete(
- id="ev_567yza",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.delete(id='ev_567yza', )
```
@@ -12631,13 +11845,8 @@ List all Runs for an Evaluation.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.list_runs_for_evaluation(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.list_runs_for_evaluation(id='id', )
```
@@ -12714,13 +11923,8 @@ the `GET /evaluations/{id}/runs` endpoint and check its status.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.create_run(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.create_run(id='id', )
```
@@ -12819,14 +12023,8 @@ that exists within another Evaluation.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.add_existing_run(
- id="id",
- run_id="run_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.add_existing_run(id='id', run_id='run_id', )
```
@@ -12901,14 +12099,8 @@ If this Run is used in any other Evaluations, it will still be available in thos
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.remove_run(
- id="id",
- run_id="run_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.remove_run(id='id', run_id='run_id', )
```
@@ -12983,14 +12175,8 @@ You can cancel a running/pending Run, or mark a Run that uses external or human
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.update_evaluation_run(
- id="id",
- run_id="run_id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.update_evaluation_run(id='id', run_id='run_id', )
```
@@ -13078,15 +12264,8 @@ Add the specified Logs to a Run.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.add_logs_to_run(
- id="id",
- run_id="run_id",
- log_ids=["log_ids"],
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], )
```
@@ -13169,13 +12348,8 @@ corresponding Evaluator statistics (such as the mean and percentiles).
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.get_stats(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.get_stats(id='id', )
```
@@ -13241,13 +12415,8 @@ This returns the Logs associated to all Runs within with the Evaluation.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.evaluations.get_logs(
- id="id",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.evaluations.get_logs(id='id', )
```
@@ -13336,14 +12505,8 @@ List all Logs for the given filter criteria.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-response = client.logs.list(
- file_id="file_123abc",
- size=1,
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+response = client.logs.list(file_id='file_123abc', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
@@ -13508,13 +12671,8 @@ Delete Logs with the given IDs.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.logs.delete(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
```
@@ -13578,13 +12736,8 @@ Retrieve the Log with the given ID.
```python
from humanloop import Humanloop
-
-client = Humanloop(
- api_key="YOUR_API_KEY",
-)
-client.logs.get(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
-)
+client = Humanloop(api_key="YOUR_API_KEY", )
+client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
```
diff --git a/requirements.txt b/requirements.txt
index cd56cd01..4b75b9bd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
-deepdiff==^8.2.0
+deepdiff==8.2.0
httpx>=0.21.2
httpx-sse==0.4.0
-mmh3==^5.1.0
+mmh3==5.1.0
opentelemetry-api>=1.27.0
opentelemetry-instrumentation-anthropic>=0.20
opentelemetry-instrumentation-bedrock>=0.15
@@ -14,5 +14,5 @@ opentelemetry-sdk>=1.27.0
parse>=1
protobuf>=5.29.3
pydantic>= 1.9.2
-pydantic-core==^2.18.2
+pydantic-core==2.18.2
typing_extensions>= 4.0.0
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 8485d75c..407e3fb6 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import (
AgentCallResponse,
AgentCallResponseToolChoice,
@@ -93,6 +95,7 @@
FileId,
FilePath,
FileRequest,
+ FileSortBy,
FileType,
FilesToolType,
FlowKernelRequest,
@@ -150,7 +153,6 @@
PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
- ProjectSortBy,
PromptCallLogResponse,
PromptCallResponse,
PromptCallResponseToolChoice,
@@ -204,6 +206,8 @@
from .errors import UnprocessableEntityError
from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoice,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffort,
@@ -214,8 +218,12 @@
AgentRequestTemplateParams,
AgentRequestToolsItem,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoice,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoice,
AgentsCallStreamRequestToolChoiceParams,
)
@@ -242,6 +250,8 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
@@ -252,8 +262,12 @@
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
@@ -461,6 +475,8 @@
"AgentLinkedFileResponseFile",
"AgentLinkedFileResponseFileParams",
"AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentLogResponse",
@@ -487,8 +503,12 @@
"AgentResponseTemplateParams",
"AgentResponseToolsItem",
"AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
"AnthropicRedactedThinkingContent",
@@ -622,6 +642,7 @@
"FilePathParams",
"FileRequest",
"FileRequestParams",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -725,7 +746,6 @@
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
"PopulateTemplateResponseTemplateParams",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallLogResponseParams",
"PromptCallResponse",
@@ -742,6 +762,8 @@
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -764,8 +786,12 @@
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
index 04260714..e8a63fd6 100644
--- a/src/humanloop/agents/__init__.py
+++ b/src/humanloop/agents/__init__.py
@@ -1,25 +1,35 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import (
+ AgentLogRequestAgent,
AgentLogRequestToolChoice,
AgentRequestReasoningEffort,
AgentRequestStop,
AgentRequestTemplate,
AgentRequestToolsItem,
+ AgentsCallRequestAgent,
AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
AgentsCallStreamRequestToolChoice,
)
from .requests import (
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffortParams,
AgentRequestStopParams,
AgentRequestTemplateParams,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffort",
@@ -30,8 +40,12 @@
"AgentRequestTemplateParams",
"AgentRequestToolsItem",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index 64f3de62..ab7b887c 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -1,53 +1,47 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawAgentsClient
-from ..requests.chat_message import ChatMessageParams
-from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..types.create_agent_log_response import CreateAgentLogResponse
-from ..types.agent_log_response import AgentLogResponse
-from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from ..requests.chat_message import ChatMessageParams
+from ..requests.evaluator_activation_deactivation_request_activate_item import (
+ EvaluatorActivationDeactivationRequestActivateItemParams,
+)
+from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
+ EvaluatorActivationDeactivationRequestDeactivateItemParams,
+)
from ..requests.provider_api_keys import ProviderApiKeysParams
-from ..types.agent_call_stream_response import AgentCallStreamResponse
-from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from ..requests.response_format import ResponseFormatParams
from ..types.agent_call_response import AgentCallResponse
-from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
+from ..types.agent_call_stream_response import AgentCallStreamResponse
from ..types.agent_continue_call_response import AgentContinueCallResponse
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
+from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
+from ..types.agent_kernel_request import AgentKernelRequest
+from ..types.agent_log_response import AgentLogResponse
from ..types.agent_response import AgentResponse
-from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.list_agents import ListAgents
+from ..types.log_status import LogStatus
from ..types.model_endpoints import ModelEndpoints
-from .requests.agent_request_template import AgentRequestTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
-from .requests.agent_request_stop import AgentRequestStopParams
-from ..requests.response_format import ResponseFormatParams
+from ..types.sort_order import SortOrder
+from ..types.template_language import TemplateLanguage
+from .raw_client import AsyncRawAgentsClient, RawAgentsClient
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_stop import AgentRequestStopParams
+from .requests.agent_request_template import AgentRequestTemplateParams
from .requests.agent_request_tools_item import AgentRequestToolsItemParams
-from ..types.list_agents import ListAgents
-from ..types.file_environment_response import FileEnvironmentResponse
-from ..requests.evaluator_activation_deactivation_request_activate_item import (
- EvaluatorActivationDeactivationRequestActivateItemParams,
-)
-from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
- EvaluatorActivationDeactivationRequestDeactivateItemParams,
-)
-from ..types.agent_kernel_request import AgentKernelRequest
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawAgentsClient
-from ..core.pagination import AsyncPager
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +79,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -164,8 +158,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -235,58 +232,14 @@ def log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
- """
- response = self._raw_client.log(
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}]}, )
+ """
+ _response = self._raw_client.log(
version_id=version_id,
environment=environment,
run_id=run_id,
@@ -323,7 +276,7 @@ def log(
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
def update_log(
self,
@@ -380,28 +333,10 @@ def update_log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", )
"""
- response = self._raw_client.update_log(
+ _response = self._raw_client.update_log(
id,
log_id,
messages=messages,
@@ -412,7 +347,7 @@ def update_log(
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
def call_stream(
self,
@@ -423,7 +358,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -482,8 +417,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -541,10 +479,7 @@ def call_stream(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
response = client.agents.call_stream()
for chunk in response:
yield chunk
@@ -585,7 +520,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -644,8 +579,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -703,21 +641,10 @@ def call(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
"""
- response = self._raw_client.call(
+ _response = self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -742,7 +669,7 @@ def call(
include_trace_children=include_trace_children,
request_options=request_options,
)
- return response.data
+ return _response.data
def continue_call_stream(
self,
@@ -789,14 +716,8 @@ def continue_call_stream(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.agents.continue_call_stream(
- log_id="log_id",
- messages=[{"role": "user"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], )
for chunk in response:
yield chunk
"""
@@ -854,29 +775,17 @@ def continue_call(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], )
"""
- response = self._raw_client.continue_call(
+ _response = self._raw_client.continue_call(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
include_trace_children=include_trace_children,
request_options=request_options,
)
- return response.data
+ return _response.data
def list(
self,
@@ -885,7 +794,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[AgentResponse]:
@@ -906,7 +815,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -923,68 +832,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.agents.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.agents.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -1118,50 +982,14 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
- )
- """
- response = self._raw_client.upsert(
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', )
+ """
+ _response = self._raw_client.upsert(
model=model,
path=path,
id=id,
@@ -1189,7 +1017,7 @@ def upsert(
readme=readme,
request_options=request_options,
)
- return response.data
+ return _response.data
def delete_agent_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1215,17 +1043,11 @@ def delete_agent_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', )
"""
- response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return _response.data
def patch_agent_version(
self,
@@ -1264,21 +1086,13 @@ def patch_agent_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', )
"""
- response = self._raw_client.patch_agent_version(
+ _response = self._raw_client.patch_agent_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def get(
self,
@@ -1316,18 +1130,13 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.get(
- id="ag_1234567890",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.get(id='ag_1234567890', )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1348,16 +1157,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.delete(
- id="ag_1234567890",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.delete(id='ag_1234567890', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -1396,19 +1200,13 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.move(id='ag_1234567890', path='new directory/new name', )
"""
- response = self._raw_client.move(
+ _response = self._raw_client.move(
id, path=path, name=name, directory_id=directory_id, request_options=request_options
)
- return response.data
+ return _response.data
def list_versions(
self,
@@ -1439,18 +1237,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.list_versions(
- id="ag_1234567890",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.list_versions(id='ag_1234567890', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1483,20 +1276,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1525,17 +1311,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.remove_deployment(
- id="id",
- environment_id="environment_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.remove_deployment(id='id', environment_id='environment_id', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1559,16 +1339,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.list_environments(
- id="ag_1234567890",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.list_environments(id='ag_1234567890', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
def update_monitoring(
self,
@@ -1605,23 +1380,13 @@ def update_monitoring(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], )
"""
- response = self._raw_client.update_monitoring(
+ _response = self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
def serialize(
self,
@@ -1630,7 +1395,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -1656,23 +1421,19 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.serialize(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.serialize(id='id', )
"""
- response = self._raw_client.serialize(
+ _response = self._raw_client.serialize(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOptions] = None) -> AgentKernelRequest:
"""
@@ -1696,16 +1457,11 @@ def deserialize(self, *, agent: str, request_options: typing.Optional[RequestOpt
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.agents.deserialize(
- agent="agent",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.deserialize(agent='agent', )
"""
- response = self._raw_client.deserialize(agent=agent, request_options=request_options)
- return response.data
+ _response = self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return _response.data
class AsyncAgentsClient:
@@ -1740,7 +1496,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1819,8 +1575,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1889,67 +1648,18 @@ async def log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
-
-
+ await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}]}, )
asyncio.run(main())
"""
- response = await self._raw_client.log(
+ _response = await self._raw_client.log(
version_id=version_id,
environment=environment,
run_id=run_id,
@@ -1986,7 +1696,7 @@ async def main() -> None:
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
async def update_log(
self,
@@ -2042,37 +1752,14 @@ async def update_log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
- )
-
-
+ await client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[{'role': "user", 'content': 'I need to withdraw $1000'}, {'role': "assistant", 'content': 'Of course! Would you like to use your savings or checking account?'}], output_message={'role': "assistant", 'content': "I'm sorry, I can't help with that."}, log_status="complete", )
asyncio.run(main())
"""
- response = await self._raw_client.update_log(
+ _response = await self._raw_client.update_log(
id,
log_id,
messages=messages,
@@ -2083,7 +1770,7 @@ async def main() -> None:
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
async def call_stream(
self,
@@ -2094,7 +1781,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2153,8 +1840,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2211,21 +1901,13 @@ async def call_stream(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
response = await client.agents.call_stream()
async for chunk in response:
yield chunk
-
-
asyncio.run(main())
"""
async with self._raw_client.call_stream(
@@ -2265,7 +1947,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2324,8 +2006,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2382,30 +2067,14 @@ async def call(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
-
-
+ await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
asyncio.run(main())
"""
- response = await self._raw_client.call(
+ _response = await self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -2430,7 +2099,7 @@ async def main() -> None:
include_trace_children=include_trace_children,
request_options=request_options,
)
- return response.data
+ return _response.data
async def continue_call_stream(
self,
@@ -2476,24 +2145,13 @@ async def continue_call_stream(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.agents.continue_call_stream(
- log_id="log_id",
- messages=[{"role": "user"}],
- )
+ response = await client.agents.continue_call_stream(log_id='log_id', messages=[{'role': "user"}], )
async for chunk in response:
yield chunk
-
-
asyncio.run(main())
"""
async with self._raw_client.continue_call_stream(
@@ -2550,38 +2208,21 @@ async def continue_call(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
- )
-
-
+ await client.agents.continue_call(log_id='log_1234567890', messages=[{'role': "tool", 'content': '{"type": "checking", "balance": 5200}', 'tool_call_id': 'tc_1234567890'}], )
asyncio.run(main())
"""
- response = await self._raw_client.continue_call(
+ _response = await self._raw_client.continue_call(
log_id=log_id,
messages=messages,
provider_api_keys=provider_api_keys,
include_trace_children=include_trace_children,
request_options=request_options,
)
- return response.data
+ return _response.data
async def list(
self,
@@ -2590,7 +2231,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[AgentResponse]:
@@ -2611,7 +2252,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -2627,77 +2268,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.agents.list(
- size=1,
- )
+ response = await client.agents.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -2830,59 +2422,18 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
- )
-
-
+ await client.agents.upsert(path='Banking/Teller Agent', provider="anthropic", endpoint="chat", model='claude-3-7-sonnet-latest', reasoning_effort=1024, template=[{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], max_iterations=3, tools=[{'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}], version_name='teller-agent-v1', version_description='Initial version', )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
model=model,
path=path,
id=id,
@@ -2910,7 +2461,7 @@ async def main() -> None:
readme=readme,
request_options=request_options,
)
- return response.data
+ return _response.data
async def delete_agent_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2935,26 +2486,15 @@ async def delete_agent_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- )
-
-
+ await client.agents.delete_agent_version(id='ag_1234567890', version_id='agv_1234567890', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
+ return _response.data
async def patch_agent_version(
self,
@@ -2992,30 +2532,17 @@ async def patch_agent_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
- )
-
-
+ await client.agents.patch_agent_version(id='ag_1234567890', version_id='agv_1234567890', name='teller-agent-v2', description='Updated version', )
asyncio.run(main())
"""
- response = await self._raw_client.patch_agent_version(
+ _response = await self._raw_client.patch_agent_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def get(
self,
@@ -3052,27 +2579,17 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.get(
- id="ag_1234567890",
- )
-
-
+ await client.agents.get(id='ag_1234567890', )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -3092,25 +2609,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.delete(
- id="ag_1234567890",
- )
-
-
+ await client.agents.delete(id='ag_1234567890', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -3148,28 +2655,17 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
- )
-
-
+ await client.agents.move(id='ag_1234567890', path='new directory/new name', )
asyncio.run(main())
"""
- response = await self._raw_client.move(
+ _response = await self._raw_client.move(
id, path=path, name=name, directory_id=directory_id, request_options=request_options
)
- return response.data
+ return _response.data
async def list_versions(
self,
@@ -3199,27 +2695,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.list_versions(
- id="ag_1234567890",
- )
-
-
+ await client.agents.list_versions(id='ag_1234567890', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -3251,29 +2737,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
- )
-
-
+ await client.agents.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3301,26 +2775,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.remove_deployment(
- id="id",
- environment_id="environment_id",
- )
-
-
+ await client.agents.remove_deployment(id='id', environment_id='environment_id', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3343,25 +2806,15 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.list_environments(
- id="ag_1234567890",
- )
-
-
+ await client.agents.list_environments(id='ag_1234567890', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
async def update_monitoring(
self,
@@ -3397,35 +2850,17 @@ async def update_monitoring(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {
- "evaluator_id": "ev_2345678901",
- "environment_id": "env_1234567890",
- },
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
- )
-
-
+ await client.agents.update_monitoring(id='ag_1234567890', activate=[{'evaluator_version_id': 'ev_1234567890'}, {'evaluator_id': 'ev_2345678901', 'environment_id': 'env_1234567890'}], deactivate=[{'evaluator_version_id': 'ev_0987654321'}], )
asyncio.run(main())
"""
- response = await self._raw_client.update_monitoring(
+ _response = await self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
async def serialize(
self,
@@ -3434,7 +2869,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -3460,31 +2895,22 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.serialize(
- id="id",
- )
-
-
+ await client.agents.serialize(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.serialize(
+ _response = await self._raw_client.serialize(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def deserialize(
self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
@@ -3509,22 +2935,12 @@ async def deserialize(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.agents.deserialize(
- agent="agent",
- )
-
-
+ await client.agents.deserialize(agent='agent', )
asyncio.run(main())
"""
- response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
- return response.data
+ _response = await self._raw_client.deserialize(agent=agent, request_options=request_options)
+ return _response.data
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index b13491a6..03caae7d 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -1,52 +1,57 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..requests.chat_message import ChatMessageParams
-from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+import contextlib
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+from json.decoder import JSONDecodeError
+
+import httpx_sse
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.create_agent_log_response import CreateAgentLogResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..types.agent_log_response import AgentLogResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
-from ..requests.provider_api_keys import ProviderApiKeysParams
-from ..types.agent_call_stream_response import AgentCallStreamResponse
-import httpx_sse
-import contextlib
-from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
-from ..types.agent_call_response import AgentCallResponse
-from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
-from ..types.agent_continue_call_response import AgentContinueCallResponse
-from ..types.model_endpoints import ModelEndpoints
-from .requests.agent_request_template import AgentRequestTemplateParams
-from ..types.template_language import TemplateLanguage
-from ..types.model_providers import ModelProviders
-from .requests.agent_request_stop import AgentRequestStopParams
-from ..requests.response_format import ResponseFormatParams
-from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
-from .requests.agent_request_tools_item import AgentRequestToolsItemParams
-from ..types.agent_response import AgentResponse
-from ..types.list_agents import ListAgents
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..requests.response_format import ResponseFormatParams
+from ..types.agent_call_response import AgentCallResponse
+from ..types.agent_call_stream_response import AgentCallStreamResponse
+from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
from ..types.agent_kernel_request import AgentKernelRequest
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..types.agent_log_response import AgentLogResponse
+from ..types.agent_response import AgentResponse
+from ..types.create_agent_log_response import CreateAgentLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.http_validation_error import HttpValidationError
+from ..types.list_agents import ListAgents
+from ..types.log_status import LogStatus
+from ..types.model_endpoints import ModelEndpoints
+from ..types.model_providers import ModelProviders
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
+from ..types.sort_order import SortOrder
+from ..types.template_language import TemplateLanguage
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
+from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
+from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
+from .requests.agent_request_stop import AgentRequestStopParams
+from .requests.agent_request_template import AgentRequestTemplateParams
+from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
+from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
+from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -73,7 +78,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -152,8 +157,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -247,7 +255,7 @@ def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -287,18 +295,19 @@ def log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_log(
self,
@@ -385,18 +394,19 @@ def update_log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.contextmanager
def call_stream(
@@ -408,7 +418,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -467,8 +477,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -540,7 +553,7 @@ def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -578,7 +591,7 @@ def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -587,18 +600,21 @@ def _iter():
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield stream()
@@ -611,7 +627,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -670,8 +686,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -743,7 +762,7 @@ def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -782,18 +801,19 @@ def call(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.contextmanager
def continue_call_stream(
@@ -869,7 +889,7 @@ def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -878,18 +898,21 @@ def _iter():
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield stream()
@@ -967,18 +990,115 @@ def continue_call(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[AgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[AgentResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upsert(
self,
@@ -1167,18 +1287,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_agent_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1211,18 +1332,19 @@ def delete_agent_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def patch_agent_version(
self,
@@ -1265,6 +1387,9 @@ def patch_agent_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1280,18 +1405,19 @@ def patch_agent_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -1347,18 +1473,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -1386,18 +1513,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -1459,18 +1587,19 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -1518,18 +1647,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1579,18 +1709,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1626,18 +1757,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1675,18 +1807,19 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_monitoring(
self,
@@ -1735,6 +1868,9 @@ def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1750,18 +1886,19 @@ def update_monitoring(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def serialize(
self,
@@ -1770,7 +1907,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -1796,7 +1933,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -1809,21 +1947,22 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return HttpResponse(response=_response, data=_response.text) # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def deserialize(
self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
@@ -1870,18 +2009,19 @@ def deserialize(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawAgentsClient:
@@ -1905,7 +2045,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1984,8 +2124,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2079,7 +2222,7 @@ async def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2119,18 +2262,19 @@ async def log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_log(
self,
@@ -2217,18 +2361,19 @@ async def update_log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.asynccontextmanager
async def call_stream(
@@ -2240,7 +2385,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2299,8 +2444,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2372,7 +2520,7 @@ async def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2410,7 +2558,7 @@ async def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -2419,18 +2567,21 @@ async def _iter():
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await stream()
@@ -2443,7 +2594,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2502,8 +2653,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2575,7 +2729,7 @@ async def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2614,18 +2768,19 @@ async def call(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.asynccontextmanager
async def continue_call_stream(
@@ -2701,7 +2856,7 @@ async def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -2710,18 +2865,21 @@ async def _iter():
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await stream()
@@ -2799,18 +2957,118 @@ async def continue_call(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[AgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[AgentResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upsert(
self,
@@ -2999,18 +3257,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_agent_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3043,18 +3302,19 @@ async def delete_agent_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def patch_agent_version(
self,
@@ -3097,6 +3357,9 @@ async def patch_agent_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3112,18 +3375,19 @@ async def patch_agent_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -3179,18 +3443,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3220,18 +3485,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -3293,18 +3559,19 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -3352,18 +3619,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -3413,18 +3681,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3460,18 +3729,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3509,18 +3779,19 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_monitoring(
self,
@@ -3569,6 +3840,9 @@ async def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3584,18 +3858,19 @@ async def update_monitoring(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def serialize(
self,
@@ -3604,7 +3879,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -3630,7 +3905,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -3643,21 +3919,22 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def deserialize(
self, *, agent: str, request_options: typing.Optional[RequestOptions] = None
@@ -3704,15 +3981,16 @@ async def deserialize(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
index 78a8f9ec..e02cfc67 100644
--- a/src/humanloop/agents/requests/__init__.py
+++ b/src/humanloop/agents/requests/__init__.py
@@ -1,19 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
+from .agent_log_request_agent import AgentLogRequestAgentParams
from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .agent_request_stop import AgentRequestStopParams
from .agent_request_template import AgentRequestTemplateParams
from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
__all__ = [
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffortParams",
"AgentRequestStopParams",
"AgentRequestTemplateParams",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..38a5adc4
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agent_log_request_tool_choice.py b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
index 584112aa..02255e30 100644
--- a/src/humanloop/agents/requests/agent_log_request_tool_choice.py
+++ b/src/humanloop/agents/requests/agent_log_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
AgentLogRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/agents/requests/agent_request_reasoning_effort.py b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
index 98a991cd..dfc8de95 100644
--- a/src/humanloop/agents/requests/agent_request_reasoning_effort.py
+++ b/src/humanloop/agents/requests/agent_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
AgentRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/requests/agent_request_template.py b/src/humanloop/agents/requests/agent_request_template.py
index c251ce8e..3b9c8c1f 100644
--- a/src/humanloop/agents/requests/agent_request_template.py
+++ b/src/humanloop/agents/requests/agent_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.chat_message import ChatMessageParams
AgentRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/agents/requests/agent_request_tools_item.py b/src/humanloop/agents/requests/agent_request_tools_item.py
index 20cde136..3bf06108 100644
--- a/src/humanloop/agents/requests/agent_request_tools_item.py
+++ b/src/humanloop/agents/requests/agent_request_tools_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
+
from ...requests.agent_inline_tool import AgentInlineToolParams
+from ...requests.agent_linked_file_request import AgentLinkedFileRequestParams
AgentRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..0123488f
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
index 1e468fa0..9ebb0f75 100644
--- a/src/humanloop/agents/requests/agents_call_request_tool_choice.py
+++ b/src/humanloop/agents/requests/agents_call_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
AgentsCallRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..eab2c55c
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
index bd068b6f..40ad08c2 100644
--- a/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
+++ b/src/humanloop/agents/requests/agents_call_stream_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
AgentsCallStreamRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
index 73d98669..0d9bf871 100644
--- a/src/humanloop/agents/types/__init__.py
+++ b/src/humanloop/agents/types/__init__.py
@@ -1,19 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
+from .agent_log_request_agent import AgentLogRequestAgent
from .agent_log_request_tool_choice import AgentLogRequestToolChoice
from .agent_request_reasoning_effort import AgentRequestReasoningEffort
from .agent_request_stop import AgentRequestStop
from .agent_request_template import AgentRequestTemplate
from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
__all__ = [
+ "AgentLogRequestAgent",
"AgentLogRequestToolChoice",
"AgentRequestReasoningEffort",
"AgentRequestStop",
"AgentRequestTemplate",
"AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
"AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
"AgentsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..b0e52d93
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agent_log_request_tool_choice.py b/src/humanloop/agents/types/agent_log_request_tool_choice.py
index bfb576c2..b1d79f3a 100644
--- a/src/humanloop/agents/types/agent_log_request_tool_choice.py
+++ b/src/humanloop/agents/types/agent_log_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
AgentLogRequestToolChoice = typing.Union[
diff --git a/src/humanloop/agents/types/agent_request_reasoning_effort.py b/src/humanloop/agents/types/agent_request_reasoning_effort.py
index b4267202..3af67155 100644
--- a/src/humanloop/agents/types/agent_request_reasoning_effort.py
+++ b/src/humanloop/agents/types/agent_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
AgentRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/agents/types/agent_request_template.py b/src/humanloop/agents/types/agent_request_template.py
index f6474824..c4da3e69 100644
--- a/src/humanloop/agents/types/agent_request_template.py
+++ b/src/humanloop/agents/types/agent_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.chat_message import ChatMessage
AgentRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/agents/types/agent_request_tools_item.py b/src/humanloop/agents/types/agent_request_tools_item.py
index e6c54b88..a43d160e 100644
--- a/src/humanloop/agents/types/agent_request_tools_item.py
+++ b/src/humanloop/agents/types/agent_request_tools_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.agent_linked_file_request import AgentLinkedFileRequest
+
from ...types.agent_inline_tool import AgentInlineTool
+from ...types.agent_linked_file_request import AgentLinkedFileRequest
AgentRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5cfbc669
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_tool_choice.py b/src/humanloop/agents/types/agents_call_request_tool_choice.py
index 6dee5a04..aee291c9 100644
--- a/src/humanloop/agents/types/agents_call_request_tool_choice.py
+++ b/src/humanloop/agents/types/agents_call_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
AgentsCallRequestToolChoice = typing.Union[
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..c803d804
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
index 83d264f0..9e636efa 100644
--- a/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
+++ b/src/humanloop/agents/types/agents_call_stream_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
AgentsCallStreamRequestToolChoice = typing.Union[
diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py
index a11298b8..2234d799 100644
--- a/src/humanloop/base_client.py
+++ b/src/humanloop/base_client.py
@@ -1,32 +1,22 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from .environment import HumanloopEnvironment
import os
+import typing
+
import httpx
+from .agents.client import AgentsClient, AsyncAgentsClient
from .core.api_error import ApiError
-from .core.client_wrapper import SyncClientWrapper
-from .prompts.client import PromptsClient
-from .tools.client import ToolsClient
-from .datasets.client import DatasetsClient
-from .evaluators.client import EvaluatorsClient
-from .flows.client import FlowsClient
-from .agents.client import AgentsClient
-from .directories.client import DirectoriesClient
-from .files.client import FilesClient
-from .evaluations.client import EvaluationsClient
-from .logs.client import LogsClient
-from .core.client_wrapper import AsyncClientWrapper
-from .prompts.client import AsyncPromptsClient
-from .tools.client import AsyncToolsClient
-from .datasets.client import AsyncDatasetsClient
-from .evaluators.client import AsyncEvaluatorsClient
-from .flows.client import AsyncFlowsClient
-from .agents.client import AsyncAgentsClient
-from .directories.client import AsyncDirectoriesClient
-from .files.client import AsyncFilesClient
-from .evaluations.client import AsyncEvaluationsClient
-from .logs.client import AsyncLogsClient
+from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from .datasets.client import AsyncDatasetsClient, DatasetsClient
+from .directories.client import AsyncDirectoriesClient, DirectoriesClient
+from .environment import HumanloopEnvironment
+from .evaluations.client import AsyncEvaluationsClient, EvaluationsClient
+from .evaluators.client import AsyncEvaluatorsClient, EvaluatorsClient
+from .files.client import AsyncFilesClient, FilesClient
+from .flows.client import AsyncFlowsClient, FlowsClient
+from .logs.client import AsyncLogsClient, LogsClient
+from .prompts.client import AsyncPromptsClient, PromptsClient
+from .tools.client import AsyncToolsClient, ToolsClient
class BaseHumanloop:
@@ -41,8 +31,6 @@ class BaseHumanloop:
environment : HumanloopEnvironment
The environment to use for requests from the client. from .environment import HumanloopEnvironment
-
-
Defaults to HumanloopEnvironment.DEFAULT
@@ -60,10 +48,7 @@ class BaseHumanloop:
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
"""
def __init__(
@@ -117,8 +102,6 @@ class AsyncBaseHumanloop:
environment : HumanloopEnvironment
The environment to use for requests from the client. from .environment import HumanloopEnvironment
-
-
Defaults to HumanloopEnvironment.DEFAULT
@@ -136,10 +119,7 @@ class AsyncBaseHumanloop:
Examples
--------
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
"""
def __init__(
diff --git a/src/humanloop/core/__init__.py b/src/humanloop/core/__init__.py
index d3eb2a8f..48f3afaa 100644
--- a/src/humanloop/core/__init__.py
+++ b/src/humanloop/core/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .api_error import ApiError
from .client_wrapper import AsyncClientWrapper, BaseClientWrapper, SyncClientWrapper
from .datetime_utils import serialize_datetime
diff --git a/src/humanloop/core/api_error.py b/src/humanloop/core/api_error.py
index 2e9fc543..6f850a60 100644
--- a/src/humanloop/core/api_error.py
+++ b/src/humanloop/core/api_error.py
@@ -1,15 +1,23 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
+from typing import Any, Dict, Optional
class ApiError(Exception):
- status_code: typing.Optional[int]
- body: typing.Any
+ headers: Optional[Dict[str, str]]
+ status_code: Optional[int]
+ body: Any
- def __init__(self, *, status_code: typing.Optional[int] = None, body: typing.Any = None):
+ def __init__(
+ self,
+ *,
+ headers: Optional[Dict[str, str]] = None,
+ status_code: Optional[int] = None,
+ body: Any = None,
+ ) -> None:
+ self.headers = headers
self.status_code = status_code
self.body = body
def __str__(self) -> str:
- return f"status_code: {self.status_code}, body: {self.body}"
+ return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}"
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 71036800..e577cdf5 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
import httpx
-from .http_client import HttpClient
-from .http_client import AsyncHttpClient
+from .http_client import AsyncHttpClient, HttpClient
class BaseClientWrapper:
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.36b1",
+ "User-Agent": "humanloop/0.8.36b2",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.36b1",
+ "X-Fern-SDK-Version": "0.8.36b2",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/core/http_response.py b/src/humanloop/core/http_response.py
index c72a9130..48a1798a 100644
--- a/src/humanloop/core/http_response.py
+++ b/src/humanloop/core/http_response.py
@@ -5,20 +5,31 @@
import httpx
T = TypeVar("T")
+"""Generic to represent the underlying type of the data wrapped by the HTTP response."""
-class HttpResponse(Generic[T]):
+class BaseHttpResponse:
+ """Minimalist HTTP response wrapper that exposes response headers."""
+
_response: httpx.Response
- _data: T
- def __init__(self, response: httpx.Response, data: T):
+ def __init__(self, response: httpx.Response):
self._response = response
- self._data = data
@property
def headers(self) -> Dict[str, str]:
return dict(self._response.headers)
+
+class HttpResponse(Generic[T], BaseHttpResponse):
+ """HTTP response wrapper that exposes response headers and data."""
+
+ _data: T
+
+ def __init__(self, response: httpx.Response, data: T):
+ super().__init__(response)
+ self._data = data
+
@property
def data(self) -> T:
return self._data
@@ -27,18 +38,15 @@ def close(self) -> None:
self._response.close()
-class AsyncHttpResponse(Generic[T]):
- _response: httpx.Response
+class AsyncHttpResponse(Generic[T], BaseHttpResponse):
+ """HTTP response wrapper that exposes response headers and data."""
+
_data: T
def __init__(self, response: httpx.Response, data: T):
- self._response = response
+ super().__init__(response)
self._data = data
- @property
- def headers(self) -> Dict[str, str]:
- return dict(self._response.headers)
-
@property
def data(self) -> T:
return self._data
diff --git a/src/humanloop/core/pagination.py b/src/humanloop/core/pagination.py
index e7e31291..209a1ff1 100644
--- a/src/humanloop/core/pagination.py
+++ b/src/humanloop/core/pagination.py
@@ -1,12 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
+from __future__ import annotations
-import pydantic
-from typing_extensions import Self
+from dataclasses import dataclass
+from typing import AsyncIterator, Awaitable, Callable, Generic, Iterator, List, Optional, TypeVar
-# Generic to represent the underlying type of the results within a page
-T = typing.TypeVar("T")
+from .http_response import BaseHttpResponse
+
+T = TypeVar("T")
+"""Generic to represent the underlying type of the results within a page"""
# SDKs implement a Page ABC per-pagination request, the endpoint then returns a pager that wraps this type
@@ -18,70 +20,63 @@
# # This should be the outer function that returns the SyncPager again
# get_next=lambda: list(..., cursor: response.cursor) (or list(..., offset: offset + 1))
# )
-class BasePage(pydantic.BaseModel, typing.Generic[T]):
- has_next: bool
- items: typing.Optional[typing.List[T]]
-
-
-class SyncPage(BasePage[T], typing.Generic[T]):
- get_next: typing.Optional[typing.Callable[[], typing.Optional[Self]]]
-
-class AsyncPage(BasePage[T], typing.Generic[T]):
- get_next: typing.Optional[typing.Callable[[], typing.Awaitable[typing.Optional[Self]]]]
-
-
-# ----------------------------
+@dataclass(frozen=True)
+class SyncPager(Generic[T]):
+ get_next: Optional[Callable[[], Optional[SyncPager[T]]]]
+ has_next: bool
+ items: Optional[List[T]]
+ response: Optional[BaseHttpResponse]
-class SyncPager(SyncPage[T], typing.Generic[T]):
# Here we type ignore the iterator to avoid a mypy error
# caused by the type conflict with Pydanitc's __iter__ method
# brought in by extending the base model
- def __iter__(self) -> typing.Iterator[T]: # type: ignore
+ def __iter__(self) -> Iterator[T]: # type: ignore[override]
for page in self.iter_pages():
if page.items is not None:
- for item in page.items:
- yield item
+ yield from page.items
+
+ def iter_pages(self) -> Iterator[SyncPager[T]]:
+ page: Optional[SyncPager[T]] = self
+ while page is not None:
+ yield page
- def iter_pages(self) -> typing.Iterator[SyncPage[T]]:
- page: typing.Union[SyncPager[T], None] = self
- while True:
- if page is not None:
- yield page
- if page.has_next and page.get_next is not None:
- page = page.get_next()
- if page is None or page.items is None or len(page.items) == 0:
- return
- else:
- return
- else:
+ if not page.has_next or page.get_next is None:
return
- def next_page(self) -> typing.Optional[SyncPage[T]]:
+ page = page.get_next()
+ if page is None or page.items is None or len(page.items) == 0:
+ return
+
+ def next_page(self) -> Optional[SyncPager[T]]:
return self.get_next() if self.get_next is not None else None
-class AsyncPager(AsyncPage[T], typing.Generic[T]):
- async def __aiter__(self) -> typing.AsyncIterator[T]: # type: ignore
+@dataclass(frozen=True)
+class AsyncPager(Generic[T]):
+ get_next: Optional[Callable[[], Awaitable[Optional[AsyncPager[T]]]]]
+ has_next: bool
+ items: Optional[List[T]]
+ response: Optional[BaseHttpResponse]
+
+ async def __aiter__(self) -> AsyncIterator[T]:
async for page in self.iter_pages():
if page.items is not None:
for item in page.items:
yield item
- async def iter_pages(self) -> typing.AsyncIterator[AsyncPage[T]]:
- page: typing.Union[AsyncPager[T], None] = self
- while True:
- if page is not None:
- yield page
- if page is not None and page.has_next and page.get_next is not None:
- page = await page.get_next()
- if page is None or page.items is None or len(page.items) == 0:
- return
- else:
- return
- else:
+ async def iter_pages(self) -> AsyncIterator[AsyncPager[T]]:
+ page: Optional[AsyncPager[T]] = self
+ while page is not None:
+ yield page
+
+ if not page.has_next or page.get_next is None:
+ return
+
+ page = await page.get_next()
+ if page is None or page.items is None or len(page.items) == 0:
return
- async def next_page(self) -> typing.Optional[AsyncPage[T]]:
+ async def next_page(self) -> Optional[AsyncPager[T]]:
return await self.get_next() if self.get_next is not None else None
diff --git a/src/humanloop/core/pydantic_utilities.py b/src/humanloop/core/pydantic_utilities.py
index f7467bcc..60a2c713 100644
--- a/src/humanloop/core/pydantic_utilities.py
+++ b/src/humanloop/core/pydantic_utilities.py
@@ -2,87 +2,65 @@
# nopycln: file
import datetime as dt
-import typing
from collections import defaultdict
+from typing import Any, Callable, ClassVar, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast
import pydantic
-import typing_extensions
-from .datetime_utils import serialize_datetime
-from .serialization import convert_and_respect_annotation_metadata
IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
if IS_PYDANTIC_V2:
- # isort will try to reformat the comments on these imports, which breaks mypy
- # isort: off
- from pydantic.v1.datetime_parse import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
- parse_date as parse_date,
- )
- from pydantic.v1.datetime_parse import ( # pyright: ignore[reportMissingImports] # Pydantic v2
- parse_datetime as parse_datetime,
- )
- from pydantic.v1.json import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
- ENCODERS_BY_TYPE as encoders_by_type,
- )
- from pydantic.v1.typing import ( # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
- get_args as get_args,
- )
- from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
- get_origin as get_origin,
- )
- from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
- is_literal_type as is_literal_type,
- )
- from pydantic.v1.typing import ( # pyright: ignore[reportMissingImports] # Pydantic v2
- is_union as is_union,
- )
- from pydantic.v1.fields import ModelField as ModelField # type: ignore # pyright: ignore[reportMissingImports] # Pydantic v2
+ from pydantic.v1.datetime_parse import parse_date as parse_date
+ from pydantic.v1.datetime_parse import parse_datetime as parse_datetime
+ from pydantic.v1.fields import ModelField as ModelField
+ from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined]
+ from pydantic.v1.typing import get_args as get_args
+ from pydantic.v1.typing import get_origin as get_origin
+ from pydantic.v1.typing import is_literal_type as is_literal_type
+ from pydantic.v1.typing import is_union as is_union
else:
- from pydantic.datetime_parse import parse_date as parse_date # type: ignore # Pydantic v1
- from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore # Pydantic v1
- from pydantic.fields import ModelField as ModelField # type: ignore # Pydantic v1
- from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore # Pydantic v1
- from pydantic.typing import get_args as get_args # type: ignore # Pydantic v1
- from pydantic.typing import get_origin as get_origin # type: ignore # Pydantic v1
- from pydantic.typing import is_literal_type as is_literal_type # type: ignore # Pydantic v1
- from pydantic.typing import is_union as is_union # type: ignore # Pydantic v1
-
- # isort: on
+ from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef]
+ from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef]
+ from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef]
+ from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef]
+ from pydantic.typing import get_args as get_args # type: ignore[no-redef]
+ from pydantic.typing import get_origin as get_origin # type: ignore[no-redef]
+ from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef]
+ from pydantic.typing import is_union as is_union # type: ignore[no-redef]
+from .datetime_utils import serialize_datetime
+from .serialization import convert_and_respect_annotation_metadata
+from typing_extensions import TypeAlias
-T = typing.TypeVar("T")
-Model = typing.TypeVar("Model", bound=pydantic.BaseModel)
+T = TypeVar("T")
+Model = TypeVar("Model", bound=pydantic.BaseModel)
-def parse_obj_as(type_: typing.Type[T], object_: typing.Any) -> T:
+def parse_obj_as(type_: Type[T], object_: Any) -> T:
dealiased_object = convert_and_respect_annotation_metadata(object_=object_, annotation=type_, direction="read")
if IS_PYDANTIC_V2:
- adapter = pydantic.TypeAdapter(type_) # type: ignore # Pydantic v2
+ adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined]
return adapter.validate_python(dealiased_object)
- else:
- return pydantic.parse_obj_as(type_, dealiased_object)
+ return pydantic.parse_obj_as(type_, dealiased_object)
-def to_jsonable_with_fallback(
- obj: typing.Any, fallback_serializer: typing.Callable[[typing.Any], typing.Any]
-) -> typing.Any:
+def to_jsonable_with_fallback(obj: Any, fallback_serializer: Callable[[Any], Any]) -> Any:
if IS_PYDANTIC_V2:
from pydantic_core import to_jsonable_python
return to_jsonable_python(obj, fallback=fallback_serializer)
- else:
- return fallback_serializer(obj)
+ return fallback_serializer(obj)
class UniversalBaseModel(pydantic.BaseModel):
if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(
+ model_config: ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( # type: ignore[typeddict-unknown-key]
# Allow fields beginning with `model_` to be used in the model
protected_namespaces=(),
- ) # type: ignore # Pydantic v2
+ )
- @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore # Pydantic v2
- def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> typing.Any: # type: ignore # Pydantic v2
+ @pydantic.model_serializer(mode="wrap", when_used="json") # type: ignore[attr-defined]
+ def serialize_model(self, handler: pydantic.SerializerFunctionWrapHandler) -> Any: # type: ignore[name-defined]
serialized = handler(self)
data = {k: serialize_datetime(v) if isinstance(v, dt.datetime) else v for k, v in serialized.items()}
return data
@@ -94,34 +72,28 @@ class Config:
json_encoders = {dt.datetime: serialize_datetime}
@classmethod
- def model_construct(
- cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
- ) -> "Model":
+ def model_construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
return cls.construct(_fields_set, **dealiased_object)
@classmethod
- def construct(
- cls: typing.Type["Model"], _fields_set: typing.Optional[typing.Set[str]] = None, **values: typing.Any
- ) -> "Model":
+ def construct(cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any) -> "Model":
dealiased_object = convert_and_respect_annotation_metadata(object_=values, annotation=cls, direction="read")
if IS_PYDANTIC_V2:
- return super().model_construct(_fields_set, **dealiased_object) # type: ignore # Pydantic v2
- else:
- return super().construct(_fields_set, **dealiased_object)
+ return super().model_construct(_fields_set, **dealiased_object) # type: ignore[misc]
+ return super().construct(_fields_set, **dealiased_object)
- def json(self, **kwargs: typing.Any) -> str:
- kwargs_with_defaults: typing.Any = {
+ def json(self, **kwargs: Any) -> str:
+ kwargs_with_defaults = {
"by_alias": True,
"exclude_unset": True,
**kwargs,
}
if IS_PYDANTIC_V2:
- return super().model_dump_json(**kwargs_with_defaults) # type: ignore # Pydantic v2
- else:
- return super().json(**kwargs_with_defaults)
+ return super().model_dump_json(**kwargs_with_defaults) # type: ignore[misc]
+ return super().json(**kwargs_with_defaults)
- def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ def dict(self, **kwargs: Any) -> Dict[str, Any]:
"""
Override the default dict method to `exclude_unset` by default. This function patches
`exclude_unset` to work include fields within non-None default values.
@@ -132,21 +104,21 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
# We'd ideally do the same for Pydantic V2, but it shells out to a library to serialize models
# that we have less control over, and this is less intrusive than custom serializers for now.
if IS_PYDANTIC_V2:
- kwargs_with_defaults_exclude_unset: typing.Any = {
+ kwargs_with_defaults_exclude_unset = {
**kwargs,
"by_alias": True,
"exclude_unset": True,
"exclude_none": False,
}
- kwargs_with_defaults_exclude_none: typing.Any = {
+ kwargs_with_defaults_exclude_none = {
**kwargs,
"by_alias": True,
"exclude_none": True,
"exclude_unset": False,
}
dict_dump = deep_union_pydantic_dicts(
- super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore # Pydantic v2
- super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore # Pydantic v2
+ super().model_dump(**kwargs_with_defaults_exclude_unset), # type: ignore[misc]
+ super().model_dump(**kwargs_with_defaults_exclude_none), # type: ignore[misc]
)
else:
@@ -166,7 +138,7 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
if default is not None:
self.__fields_set__.add(name)
- kwargs_with_defaults_exclude_unset_include_fields: typing.Any = {
+ kwargs_with_defaults_exclude_unset_include_fields = {
"by_alias": True,
"exclude_unset": True,
"include": _fields_set,
@@ -178,12 +150,10 @@ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
return convert_and_respect_annotation_metadata(object_=dict_dump, annotation=self.__class__, direction="write")
-def _union_list_of_pydantic_dicts(
- source: typing.List[typing.Any], destination: typing.List[typing.Any]
-) -> typing.List[typing.Any]:
- converted_list: typing.List[typing.Any] = []
+def _union_list_of_pydantic_dicts(source: List[Any], destination: List[Any]) -> List[Any]:
+ converted_list: List[Any] = []
for i, item in enumerate(source):
- destination_value = destination[i] # type: ignore
+ destination_value = destination[i]
if isinstance(item, dict):
converted_list.append(deep_union_pydantic_dicts(item, destination_value))
elif isinstance(item, list):
@@ -193,9 +163,7 @@ def _union_list_of_pydantic_dicts(
return converted_list
-def deep_union_pydantic_dicts(
- source: typing.Dict[str, typing.Any], destination: typing.Dict[str, typing.Any]
-) -> typing.Dict[str, typing.Any]:
+def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any]) -> Dict[str, Any]:
for key, value in source.items():
node = destination.setdefault(key, {})
if isinstance(value, dict):
@@ -213,18 +181,16 @@ def deep_union_pydantic_dicts(
if IS_PYDANTIC_V2:
- class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore # Pydantic v2
+ class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[name-defined, type-arg]
pass
- UniversalRootModel: typing_extensions.TypeAlias = V2RootModel # type: ignore
+ UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc]
else:
- UniversalRootModel: typing_extensions.TypeAlias = UniversalBaseModel # type: ignore
+ UniversalRootModel: TypeAlias = UniversalBaseModel # type: ignore[misc, no-redef]
-def encode_by_type(o: typing.Any) -> typing.Any:
- encoders_by_class_tuples: typing.Dict[typing.Callable[[typing.Any], typing.Any], typing.Tuple[typing.Any, ...]] = (
- defaultdict(tuple)
- )
+def encode_by_type(o: Any) -> Any:
+ encoders_by_class_tuples: Dict[Callable[[Any], Any], Tuple[Any, ...]] = defaultdict(tuple)
for type_, encoder in encoders_by_type.items():
encoders_by_class_tuples[encoder] += (type_,)
@@ -235,54 +201,49 @@ def encode_by_type(o: typing.Any) -> typing.Any:
return encoder(o)
-def update_forward_refs(model: typing.Type["Model"], **localns: typing.Any) -> None:
+def update_forward_refs(model: Type["Model"], **localns: Any) -> None:
if IS_PYDANTIC_V2:
- model.model_rebuild(raise_errors=False) # type: ignore # Pydantic v2
+ model.model_rebuild(raise_errors=False) # type: ignore[attr-defined]
else:
model.update_forward_refs(**localns)
# Mirrors Pydantic's internal typing
-AnyCallable = typing.Callable[..., typing.Any]
+AnyCallable = Callable[..., Any]
def universal_root_validator(
pre: bool = False,
-) -> typing.Callable[[AnyCallable], AnyCallable]:
+) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
if IS_PYDANTIC_V2:
- return pydantic.model_validator(mode="before" if pre else "after")(func) # type: ignore # Pydantic v2
- else:
- return pydantic.root_validator(pre=pre)(func) # type: ignore # Pydantic v1
+ return cast(AnyCallable, pydantic.model_validator(mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
+ return cast(AnyCallable, pydantic.root_validator(pre=pre)(func)) # type: ignore[call-overload]
return decorator
-def universal_field_validator(field_name: str, pre: bool = False) -> typing.Callable[[AnyCallable], AnyCallable]:
+def universal_field_validator(field_name: str, pre: bool = False) -> Callable[[AnyCallable], AnyCallable]:
def decorator(func: AnyCallable) -> AnyCallable:
if IS_PYDANTIC_V2:
- return pydantic.field_validator(field_name, mode="before" if pre else "after")(func) # type: ignore # Pydantic v2
- else:
- return pydantic.validator(field_name, pre=pre)(func) # type: ignore # Pydantic v1
+ return cast(AnyCallable, pydantic.field_validator(field_name, mode="before" if pre else "after")(func)) # type: ignore[attr-defined]
+ return cast(AnyCallable, pydantic.validator(field_name, pre=pre)(func))
return decorator
-PydanticField = typing.Union[ModelField, pydantic.fields.FieldInfo]
+PydanticField = Union[ModelField, pydantic.fields.FieldInfo]
-def _get_model_fields(
- model: typing.Type["Model"],
-) -> typing.Mapping[str, PydanticField]:
+def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]:
if IS_PYDANTIC_V2:
- return model.model_fields # type: ignore # Pydantic v2
- else:
- return model.__fields__ # type: ignore # Pydantic v1
+ return cast(Mapping[str, PydanticField], model.model_fields) # type: ignore[attr-defined]
+ return cast(Mapping[str, PydanticField], model.__fields__)
-def _get_field_default(field: PydanticField) -> typing.Any:
+def _get_field_default(field: PydanticField) -> Any:
try:
- value = field.get_default() # type: ignore # Pydantic < v1.10.15
+ value = field.get_default() # type: ignore[union-attr]
except:
value = field.default
if IS_PYDANTIC_V2:
diff --git a/src/humanloop/datasets/__init__.py b/src/humanloop/datasets/__init__.py
index 5b47c541..ff5c1227 100644
--- a/src/humanloop/datasets/__init__.py
+++ b/src/humanloop/datasets/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints
__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"]
diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py
index 0e70c39c..30c7d310 100644
--- a/src/humanloop/datasets/client.py
+++ b/src/humanloop/datasets/client.py
@@ -1,33 +1,23 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawDatasetsClient
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
+
+from .. import core
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..core.pagination import SyncPager
-from ..types.dataset_response import DatasetResponse
-from ..types.paginated_dataset_response import PaginatedDatasetResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
from ..requests.create_datapoint_request import CreateDatapointRequestParams
-from ..types.update_dateset_action import UpdateDatesetAction
from ..types.datapoint_response import DatapointResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..types.paginated_datapoint_response import PaginatedDatapointResponse
+from ..types.dataset_response import DatasetResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.list_datasets import ListDatasets
+from ..types.sort_order import SortOrder
+from ..types.update_dateset_action import UpdateDatesetAction
+from .raw_client import AsyncRawDatasetsClient, RawDatasetsClient
from .types.list_versions_datasets_id_versions_get_request_include_datapoints import (
ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints,
)
-from ..types.list_datasets import ListDatasets
-from .. import core
-from ..types.file_environment_response import FileEnvironmentResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawDatasetsClient
-from ..core.pagination import AsyncPager
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -55,7 +45,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[DatasetResponse]:
@@ -76,7 +66,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
@@ -93,68 +83,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.datasets.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.datasets.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "datasets",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDatasetResponse,
- construct_type(
- type_=PaginatedDatasetResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -239,28 +184,10 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.upsert(
- path="test-questions",
- datapoints=[
- {
- "inputs": {"question": "What is the capital of France?"},
- "target": {"answer": "Paris"},
- },
- {
- "inputs": {"question": "Who wrote Hamlet?"},
- "target": {"answer": "William Shakespeare"},
- },
- ],
- action="set",
- version_name="test-questions-v1",
- version_description="Add two new questions and answers",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', )
"""
- response = self._raw_client.upsert(
+ _response = self._raw_client.upsert(
datapoints=datapoints,
version_id=version_id,
environment=environment,
@@ -273,7 +200,7 @@ def upsert(
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
def get(
self,
@@ -320,24 +247,17 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id,
version_id=version_id,
environment=environment,
include_datapoints=include_datapoints,
request_options=request_options,
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -358,16 +278,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.delete(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.delete(id='id', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -402,16 +317,11 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.move(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.move(id='id', )
"""
- response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
def list_datapoints(
self,
@@ -454,66 +364,17 @@ def list_datapoints(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- f"datasets/{jsonable_encoder(id)}/datapoints",
- method="GET",
- params={
- "version_id": version_id,
- "environment": environment,
- "page": page,
- "size": size,
- },
- request_options=request_options,
+ return self._raw_client.list_datapoints(
+ id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDatapointResponse,
- construct_type(
- type_=PaginatedDatapointResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list_datapoints(
- id,
- version_id=version_id,
- environment=environment,
- page=page + 1,
- size=size,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def list_versions(
self,
@@ -544,18 +405,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.list_versions(
- id="ds_b0baF1ca7652",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.list_versions(id='ds_b0baF1ca7652', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, include_datapoints=include_datapoints, request_options=request_options
)
- return response.data
+ return _response.data
def delete_dataset_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -581,17 +437,11 @@ def delete_dataset_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.delete_dataset_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.delete_dataset_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_dataset_version(id, version_id, request_options=request_options)
+ return _response.data
def update_dataset_version(
self,
@@ -630,19 +480,13 @@ def update_dataset_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.update_dataset_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.update_dataset_version(
+ _response = self._raw_client.update_dataset_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def upload_csv(
self,
@@ -699,15 +543,10 @@ def upload_csv(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.upload_csv(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.upload_csv(id='id', )
"""
- response = self._raw_client.upload_csv(
+ _response = self._raw_client.upload_csv(
id,
file=file,
version_id=version_id,
@@ -716,7 +555,7 @@ def upload_csv(
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -748,20 +587,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -789,17 +621,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -823,16 +649,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.datasets.list_environments(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.datasets.list_environments(id='id', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
class AsyncDatasetsClient:
@@ -857,7 +678,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[DatasetResponse]:
@@ -878,7 +699,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
@@ -894,77 +715,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.datasets.list(
- size=1,
- )
+ response = await client.datasets.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "datasets",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDatasetResponse,
- construct_type(
- type_=PaginatedDatasetResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -1048,37 +820,14 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.upsert(
- path="test-questions",
- datapoints=[
- {
- "inputs": {"question": "What is the capital of France?"},
- "target": {"answer": "Paris"},
- },
- {
- "inputs": {"question": "Who wrote Hamlet?"},
- "target": {"answer": "William Shakespeare"},
- },
- ],
- action="set",
- version_name="test-questions-v1",
- version_description="Add two new questions and answers",
- )
-
-
+ await client.datasets.upsert(path='test-questions', datapoints=[{'inputs': {'question': 'What is the capital of France?'}, 'target': {'answer': 'Paris'}}, {'inputs': {'question': 'Who wrote Hamlet?'}, 'target': {'answer': 'William Shakespeare'}}], action="set", version_name='test-questions-v1', version_description='Add two new questions and answers', )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
datapoints=datapoints,
version_id=version_id,
environment=environment,
@@ -1091,7 +840,7 @@ async def main() -> None:
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
async def get(
self,
@@ -1137,33 +886,21 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.get(
- id="ds_b0baF1ca7652",
- version_id="dsv_6L78pqrdFi2xa",
- include_datapoints=True,
- )
-
-
+ await client.datasets.get(id='ds_b0baF1ca7652', version_id='dsv_6L78pqrdFi2xa', include_datapoints=True, )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id,
version_id=version_id,
environment=environment,
include_datapoints=include_datapoints,
request_options=request_options,
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1183,25 +920,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.delete(
- id="id",
- )
-
-
+ await client.datasets.delete(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -1235,25 +962,15 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.move(
- id="id",
- )
-
-
+ await client.datasets.move(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
async def list_datapoints(
self,
@@ -1295,75 +1012,22 @@ async def list_datapoints(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.datasets.list_datapoints(
- id="ds_b0baF1ca7652",
- size=1,
- )
+ response = await client.datasets.list_datapoints(id='ds_b0baF1ca7652', size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- f"datasets/{jsonable_encoder(id)}/datapoints",
- method="GET",
- params={
- "version_id": version_id,
- "environment": environment,
- "page": page,
- "size": size,
- },
- request_options=request_options,
+ return await self._raw_client.list_datapoints(
+ id, version_id=version_id, environment=environment, page=page, size=size, request_options=request_options
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDatapointResponse,
- construct_type(
- type_=PaginatedDatapointResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list_datapoints(
- id,
- version_id=version_id,
- environment=environment,
- page=page + 1,
- size=size,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def list_versions(
self,
@@ -1393,27 +1057,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.list_versions(
- id="ds_b0baF1ca7652",
- )
-
-
+ await client.datasets.list_versions(id='ds_b0baF1ca7652', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, include_datapoints=include_datapoints, request_options=request_options
)
- return response.data
+ return _response.data
async def delete_dataset_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1438,26 +1092,15 @@ async def delete_dataset_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.delete_dataset_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.datasets.delete_dataset_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_dataset_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_dataset_version(id, version_id, request_options=request_options)
+ return _response.data
async def update_dataset_version(
self,
@@ -1495,28 +1138,17 @@ async def update_dataset_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.update_dataset_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.datasets.update_dataset_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_dataset_version(
+ _response = await self._raw_client.update_dataset_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def upload_csv(
self,
@@ -1572,24 +1204,14 @@ async def upload_csv(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.upload_csv(
- id="id",
- )
-
-
+ await client.datasets.upload_csv(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.upload_csv(
+ _response = await self._raw_client.upload_csv(
id,
file=file,
version_id=version_id,
@@ -1598,7 +1220,7 @@ async def main() -> None:
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1629,29 +1251,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.set_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- version_id="dsv_6L78pqrdFi2xa",
- )
-
-
+ await client.datasets.set_deployment(id='ds_b0baF1ca7652', environment_id='staging', version_id='dsv_6L78pqrdFi2xa', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1678,26 +1288,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.remove_deployment(
- id="ds_b0baF1ca7652",
- environment_id="staging",
- )
-
-
+ await client.datasets.remove_deployment(id='ds_b0baF1ca7652', environment_id='staging', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1720,22 +1319,12 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.datasets.list_environments(
- id="id",
- )
-
-
+ await client.datasets.list_environments(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
diff --git a/src/humanloop/datasets/raw_client.py b/src/humanloop/datasets/raw_client.py
index 5cc9785c..774f04fc 100644
--- a/src/humanloop/datasets/raw_client.py
+++ b/src/humanloop/datasets/raw_client.py
@@ -1,27 +1,32 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..requests.create_datapoint_request import CreateDatapointRequestParams
-from ..types.update_dateset_action import UpdateDatesetAction
+from json.decoder import JSONDecodeError
+
+from .. import core
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.dataset_response import DatasetResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..requests.create_datapoint_request import CreateDatapointRequestParams
+from ..types.datapoint_response import DatapointResponse
+from ..types.dataset_response import DatasetResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..core.jsonable_encoder import jsonable_encoder
+from ..types.list_datasets import ListDatasets
+from ..types.paginated_datapoint_response import PaginatedDatapointResponse
+from ..types.paginated_dataset_response import PaginatedDatasetResponse
+from ..types.sort_order import SortOrder
+from ..types.update_dateset_action import UpdateDatesetAction
from .types.list_versions_datasets_id_versions_get_request_include_datapoints import (
ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints,
)
-from ..types.list_datasets import ListDatasets
-from .. import core
-from ..types.file_environment_response import FileEnvironmentResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -31,6 +36,102 @@ class RawDatasetsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[DatasetResponse]:
+ """
+ List all Datasets.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Datasets to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Dataset name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Datasets by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[DatasetResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "datasets",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDatasetResponse,
+ construct_type(
+ type_=PaginatedDatasetResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
def upsert(
self,
*,
@@ -148,18 +249,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -225,18 +327,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -264,18 +367,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -332,18 +436,108 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list_datapoints(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[DatapointResponse]:
+ """
+ List all Datapoints for the Dataset with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Dataset.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Dataset to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Datapoints to fetch.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[DatapointResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ f"datasets/{jsonable_encoder(id)}/datapoints",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ "page": page,
+ "size": size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDatapointResponse,
+ construct_type(
+ type_=PaginatedDatapointResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list_datapoints(
+ id,
+ version_id=version_id,
+ environment=environment,
+ page=page + 1,
+ size=size,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -391,18 +585,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_dataset_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -435,18 +630,19 @@ def delete_dataset_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_dataset_version(
self,
@@ -489,6 +685,9 @@ def update_dataset_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -504,18 +703,19 @@ def update_dataset_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upload_csv(
self,
@@ -583,6 +783,9 @@ def upload_csv(
files={
"file": file,
},
+ headers={
+ "content-type": "multipart/form-data",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -598,18 +801,19 @@ def upload_csv(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -658,18 +862,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -704,18 +909,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -753,24 +959,124 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawDatasetsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[DatasetResponse]:
+ """
+ List all Datasets.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Datasets to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Dataset name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Datasets by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[DatasetResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "datasets",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDatasetResponse,
+ construct_type(
+ type_=PaginatedDatasetResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
async def upsert(
self,
*,
@@ -888,18 +1194,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -965,18 +1272,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1006,18 +1314,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -1074,18 +1383,111 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list_datapoints(
+ self,
+ id: str,
+ *,
+ version_id: typing.Optional[str] = None,
+ environment: typing.Optional[str] = None,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[DatapointResponse]:
+ """
+ List all Datapoints for the Dataset with the given ID.
+
+ Parameters
+ ----------
+ id : str
+ Unique identifier for Dataset.
+
+ version_id : typing.Optional[str]
+ A specific Version ID of the Dataset to retrieve.
+
+ environment : typing.Optional[str]
+ Name of the Environment to retrieve a deployed Version from.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Datapoints to fetch.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[DatapointResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ f"datasets/{jsonable_encoder(id)}/datapoints",
+ method="GET",
+ params={
+ "version_id": version_id,
+ "environment": environment,
+ "page": page,
+ "size": size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDatapointResponse,
+ construct_type(
+ type_=PaginatedDatapointResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list_datapoints(
+ id,
+ version_id=version_id,
+ environment=environment,
+ page=page + 1,
+ size=size,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -1133,18 +1535,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_dataset_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1177,18 +1580,19 @@ async def delete_dataset_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_dataset_version(
self,
@@ -1231,6 +1635,9 @@ async def update_dataset_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1246,18 +1653,19 @@ async def update_dataset_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upload_csv(
self,
@@ -1325,6 +1733,9 @@ async def upload_csv(
files={
"file": file,
},
+ headers={
+ "content-type": "multipart/form-data",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1340,18 +1751,19 @@ async def upload_csv(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1400,18 +1812,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1446,18 +1859,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1495,15 +1909,16 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/datasets/types/__init__.py b/src/humanloop/datasets/types/__init__.py
index a84489fe..419263e1 100644
--- a/src/humanloop/datasets/types/__init__.py
+++ b/src/humanloop/datasets/types/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .list_versions_datasets_id_versions_get_request_include_datapoints import (
ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints,
)
diff --git a/src/humanloop/directories/__init__.py b/src/humanloop/directories/__init__.py
index f3ea2659..5cde0202 100644
--- a/src/humanloop/directories/__init__.py
+++ b/src/humanloop/directories/__init__.py
@@ -1,2 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
diff --git a/src/humanloop/directories/client.py b/src/humanloop/directories/client.py
index 1d5383f3..62972278 100644
--- a/src/humanloop/directories/client.py
+++ b/src/humanloop/directories/client.py
@@ -1,13 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawDirectoriesClient
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.request_options import RequestOptions
from ..types.directory_response import DirectoryResponse
from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawDirectoriesClient
+from .raw_client import AsyncRawDirectoriesClient, RawDirectoriesClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -45,14 +44,11 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> ty
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
client.directories.list()
"""
- response = self._raw_client.list(request_options=request_options)
- return response.data
+ _response = self._raw_client.list(request_options=request_options)
+ return _response.data
def create(
self,
@@ -87,14 +83,11 @@ def create(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
client.directories.create()
"""
- response = self._raw_client.create(name=name, parent_id=parent_id, path=path, request_options=request_options)
- return response.data
+ _response = self._raw_client.create(name=name, parent_id=parent_id, path=path, request_options=request_options)
+ return _response.data
def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -118,16 +111,11 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.directories.get(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.directories.get(id='id', )
"""
- response = self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get(id, request_options=request_options)
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -150,16 +138,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.directories.delete(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.directories.delete(id='id', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def update(
self,
@@ -198,18 +181,13 @@ def update(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.directories.update(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.directories.update(id='id', )
"""
- response = self._raw_client.update(
+ _response = self._raw_client.update(
id, name=name, parent_id=parent_id, path=path, request_options=request_options
)
- return response.data
+ return _response.data
class AsyncDirectoriesClient:
@@ -243,23 +221,15 @@ async def list(self, *, request_options: typing.Optional[RequestOptions] = None)
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
await client.directories.list()
-
-
asyncio.run(main())
"""
- response = await self._raw_client.list(request_options=request_options)
- return response.data
+ _response = await self._raw_client.list(request_options=request_options)
+ return _response.data
async def create(
self,
@@ -293,25 +263,17 @@ async def create(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
await client.directories.create()
-
-
asyncio.run(main())
"""
- response = await self._raw_client.create(
+ _response = await self._raw_client.create(
name=name, parent_id=parent_id, path=path, request_options=request_options
)
- return response.data
+ return _response.data
async def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -334,25 +296,15 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.directories.get(
- id="id",
- )
-
-
+ await client.directories.get(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.get(id, request_options=request_options)
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -374,25 +326,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.directories.delete(
- id="id",
- )
-
-
+ await client.directories.delete(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def update(
self,
@@ -430,24 +372,14 @@ async def update(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.directories.update(
- id="id",
- )
-
-
+ await client.directories.update(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.update(
+ _response = await self._raw_client.update(
id, name=name, parent_id=parent_id, path=path, request_options=request_options
)
- return response.data
+ return _response.data
diff --git a/src/humanloop/directories/raw_client.py b/src/humanloop/directories/raw_client.py
index 36f4b188..e2f10091 100644
--- a/src/humanloop/directories/raw_client.py
+++ b/src/humanloop/directories/raw_client.py
@@ -1,19 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.directory_response import DirectoryResponse
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
+from ..types.directory_response import DirectoryResponse
from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..types.http_validation_error import HttpValidationError
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -56,18 +55,19 @@ def list(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def create(
self,
@@ -125,18 +125,19 @@ def create(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -174,18 +175,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -215,18 +217,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update(
self,
@@ -288,18 +291,19 @@ def update(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawDirectoriesClient:
@@ -339,18 +343,19 @@ async def list(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def create(
self,
@@ -408,18 +413,19 @@ async def create(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -457,18 +463,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -500,18 +507,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update(
self,
@@ -573,15 +581,16 @@ async def update(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/errors/__init__.py b/src/humanloop/errors/__init__.py
index cb64e066..67183e01 100644
--- a/src/humanloop/errors/__init__.py
+++ b/src/humanloop/errors/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .unprocessable_entity_error import UnprocessableEntityError
__all__ = ["UnprocessableEntityError"]
diff --git a/src/humanloop/errors/unprocessable_entity_error.py b/src/humanloop/errors/unprocessable_entity_error.py
index 47470a70..d3f9c5d8 100644
--- a/src/humanloop/errors/unprocessable_entity_error.py
+++ b/src/humanloop/errors/unprocessable_entity_error.py
@@ -1,9 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
from ..core.api_error import ApiError
from ..types.http_validation_error import HttpValidationError
class UnprocessableEntityError(ApiError):
- def __init__(self, body: HttpValidationError):
- super().__init__(status_code=422, body=body)
+ def __init__(self, body: HttpValidationError, headers: typing.Optional[typing.Dict[str, str]] = None):
+ super().__init__(status_code=422, headers=headers, body=body)
diff --git a/src/humanloop/evaluations/__init__.py b/src/humanloop/evaluations/__init__.py
index 3f4e56cd..3498bb70 100644
--- a/src/humanloop/evaluations/__init__.py
+++ b/src/humanloop/evaluations/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import (
AddEvaluatorsRequestEvaluatorsItem,
CreateEvaluationRequestEvaluatorsItem,
diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py
index 9a32433a..006fb99b 100644
--- a/src/humanloop/evaluations/client.py
+++ b/src/humanloop/evaluations/client.py
@@ -1,30 +1,22 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawEvaluationsClient
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..core.pagination import SyncPager
-from ..types.evaluation_response import EvaluationResponse
-from ..types.paginated_evaluation_response import PaginatedEvaluationResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
from ..requests.file_request import FileRequestParams
-from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams
-from ..types.evaluation_runs_response import EvaluationRunsResponse
-from .requests.create_run_request_dataset import CreateRunRequestDatasetParams
-from .requests.create_run_request_version import CreateRunRequestVersionParams
+from ..types.evaluation_response import EvaluationResponse
from ..types.evaluation_run_response import EvaluationRunResponse
-from ..types.evaluation_status import EvaluationStatus
+from ..types.evaluation_runs_response import EvaluationRunsResponse
from ..types.evaluation_stats import EvaluationStats
+from ..types.evaluation_status import EvaluationStatus
from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawEvaluationsClient
-from ..core.pagination import AsyncPager
+from .raw_client import AsyncRawEvaluationsClient, RawEvaluationsClient
+from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams
+from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
+from .requests.create_run_request_dataset import CreateRunRequestDatasetParams
+from .requests.create_run_request_version import CreateRunRequestVersionParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -78,63 +70,15 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.evaluations.list(
- file_id="pr_30gco7dx6JDq4200GVOHa",
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "evaluations",
- method="GET",
- params={
- "file_id": file_id,
- "page": page,
- "size": size,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedEvaluationResponse,
- construct_type(
- type_=PaginatedEvaluationResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- file_id=file_id,
- page=page + 1,
- size=size,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options)
def create(
self,
@@ -173,16 +117,13 @@ def create(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.create(
- evaluators=[{"version_id": "version_id"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.create(evaluators=[{'version_id': 'version_id'}], )
"""
- response = self._raw_client.create(evaluators=evaluators, file=file, name=name, request_options=request_options)
- return response.data
+ _response = self._raw_client.create(
+ evaluators=evaluators, file=file, name=name, request_options=request_options
+ )
+ return _response.data
def add_evaluators(
self,
@@ -215,17 +156,11 @@ def add_evaluators(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.add_evaluators(
- id="id",
- evaluators=[{"version_id": "version_id"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], )
"""
- response = self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options)
- return response.data
+ _response = self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options)
+ return _response.data
def remove_evaluator(
self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -254,17 +189,11 @@ def remove_evaluator(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.remove_evaluator(
- id="id",
- evaluator_version_id="evaluator_version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', )
"""
- response = self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options)
+ return _response.data
def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
"""
@@ -292,16 +221,11 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.get(
- id="ev_567yza",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.get(id='ev_567yza', )
"""
- response = self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get(id, request_options=request_options)
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -324,16 +248,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.delete(
- id="ev_567yza",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.delete(id='ev_567yza', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def list_runs_for_evaluation(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -357,16 +276,11 @@ def list_runs_for_evaluation(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.list_runs_for_evaluation(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.list_runs_for_evaluation(id='id', )
"""
- response = self._raw_client.list_runs_for_evaluation(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_runs_for_evaluation(id, request_options=request_options)
+ return _response.data
def create_run(
self,
@@ -422,15 +336,10 @@ def create_run(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.create_run(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.create_run(id='id', )
"""
- response = self._raw_client.create_run(
+ _response = self._raw_client.create_run(
id,
dataset=dataset,
version=version,
@@ -438,7 +347,7 @@ def create_run(
use_existing_logs=use_existing_logs,
request_options=request_options,
)
- return response.data
+ return _response.data
def add_existing_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -468,17 +377,11 @@ def add_existing_run(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.add_existing_run(
- id="id",
- run_id="run_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.add_existing_run(id='id', run_id='run_id', )
"""
- response = self._raw_client.add_existing_run(id, run_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.add_existing_run(id, run_id, request_options=request_options)
+ return _response.data
def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -505,17 +408,11 @@ def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[R
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.remove_run(
- id="id",
- run_id="run_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.remove_run(id='id', run_id='run_id', )
"""
- response = self._raw_client.remove_run(id, run_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_run(id, run_id, request_options=request_options)
+ return _response.data
def update_evaluation_run(
self,
@@ -557,19 +454,13 @@ def update_evaluation_run(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.update_evaluation_run(
- id="id",
- run_id="run_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.update_evaluation_run(id='id', run_id='run_id', )
"""
- response = self._raw_client.update_evaluation_run(
+ _response = self._raw_client.update_evaluation_run(
id, run_id, control=control, status=status, request_options=request_options
)
- return response.data
+ return _response.data
def add_logs_to_run(
self,
@@ -604,18 +495,11 @@ def add_logs_to_run(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.add_logs_to_run(
- id="id",
- run_id="run_id",
- log_ids=["log_ids"],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], )
"""
- response = self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options)
- return response.data
+ _response = self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options)
+ return _response.data
def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
"""
@@ -640,16 +524,11 @@ def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions]
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.get_stats(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.get_stats(id='id', )
"""
- response = self._raw_client.get_stats(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get_stats(id, request_options=request_options)
+ return _response.data
def get_logs(
self,
@@ -690,16 +569,11 @@ def get_logs(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluations.get_logs(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluations.get_logs(id='id', )
"""
- response = self._raw_client.get_logs(id, page=page, size=size, run_id=run_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get_logs(id, page=page, size=size, run_id=run_id, request_options=request_options)
+ return _response.data
class AsyncEvaluationsClient:
@@ -749,72 +623,20 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.evaluations.list(
- file_id="pr_30gco7dx6JDq4200GVOHa",
- size=1,
- )
+ response = await client.evaluations.list(file_id='pr_30gco7dx6JDq4200GVOHa', size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "evaluations",
- method="GET",
- params={
- "file_id": file_id,
- "page": page,
- "size": size,
- },
- request_options=request_options,
- )
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedEvaluationResponse,
- construct_type(
- type_=PaginatedEvaluationResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- file_id=file_id,
- page=page + 1,
- size=size,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return await self._raw_client.list(file_id=file_id, page=page, size=size, request_options=request_options)
async def create(
self,
@@ -852,27 +674,17 @@ async def create(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.create(
- evaluators=[{"version_id": "version_id"}],
- )
-
-
+ await client.evaluations.create(evaluators=[{'version_id': 'version_id'}], )
asyncio.run(main())
"""
- response = await self._raw_client.create(
+ _response = await self._raw_client.create(
evaluators=evaluators, file=file, name=name, request_options=request_options
)
- return response.data
+ return _response.data
async def add_evaluators(
self,
@@ -904,26 +716,15 @@ async def add_evaluators(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.add_evaluators(
- id="id",
- evaluators=[{"version_id": "version_id"}],
- )
-
-
+ await client.evaluations.add_evaluators(id='id', evaluators=[{'version_id': 'version_id'}], )
asyncio.run(main())
"""
- response = await self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options)
- return response.data
+ _response = await self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options)
+ return _response.data
async def remove_evaluator(
self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -951,26 +752,15 @@ async def remove_evaluator(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.remove_evaluator(
- id="id",
- evaluator_version_id="evaluator_version_id",
- )
-
-
+ await client.evaluations.remove_evaluator(id='id', evaluator_version_id='evaluator_version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options)
+ return _response.data
async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse:
"""
@@ -997,25 +787,15 @@ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions]
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.get(
- id="ev_567yza",
- )
-
-
+ await client.evaluations.get(id='ev_567yza', )
asyncio.run(main())
"""
- response = await self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.get(id, request_options=request_options)
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1037,25 +817,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.delete(
- id="ev_567yza",
- )
-
-
+ await client.evaluations.delete(id='ev_567yza', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def list_runs_for_evaluation(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1078,25 +848,15 @@ async def list_runs_for_evaluation(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.list_runs_for_evaluation(
- id="id",
- )
-
-
+ await client.evaluations.list_runs_for_evaluation(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.list_runs_for_evaluation(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_runs_for_evaluation(id, request_options=request_options)
+ return _response.data
async def create_run(
self,
@@ -1151,24 +911,14 @@ async def create_run(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.create_run(
- id="id",
- )
-
-
+ await client.evaluations.create_run(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.create_run(
+ _response = await self._raw_client.create_run(
id,
dataset=dataset,
version=version,
@@ -1176,7 +926,7 @@ async def main() -> None:
use_existing_logs=use_existing_logs,
request_options=request_options,
)
- return response.data
+ return _response.data
async def add_existing_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1205,26 +955,15 @@ async def add_existing_run(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.add_existing_run(
- id="id",
- run_id="run_id",
- )
-
-
+ await client.evaluations.add_existing_run(id='id', run_id='run_id', )
asyncio.run(main())
"""
- response = await self._raw_client.add_existing_run(id, run_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.add_existing_run(id, run_id, request_options=request_options)
+ return _response.data
async def remove_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1252,26 +991,15 @@ async def remove_run(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.remove_run(
- id="id",
- run_id="run_id",
- )
-
-
+ await client.evaluations.remove_run(id='id', run_id='run_id', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_run(id, run_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_run(id, run_id, request_options=request_options)
+ return _response.data
async def update_evaluation_run(
self,
@@ -1312,28 +1040,17 @@ async def update_evaluation_run(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.update_evaluation_run(
- id="id",
- run_id="run_id",
- )
-
-
+ await client.evaluations.update_evaluation_run(id='id', run_id='run_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_evaluation_run(
+ _response = await self._raw_client.update_evaluation_run(
id, run_id, control=control, status=status, request_options=request_options
)
- return response.data
+ return _response.data
async def add_logs_to_run(
self,
@@ -1367,27 +1084,15 @@ async def add_logs_to_run(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.add_logs_to_run(
- id="id",
- run_id="run_id",
- log_ids=["log_ids"],
- )
-
-
+ await client.evaluations.add_logs_to_run(id='id', run_id='run_id', log_ids=['log_ids'], )
asyncio.run(main())
"""
- response = await self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options)
- return response.data
+ _response = await self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options)
+ return _response.data
async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats:
"""
@@ -1411,25 +1116,15 @@ async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOp
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.get_stats(
- id="id",
- )
-
-
+ await client.evaluations.get_stats(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.get_stats(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.get_stats(id, request_options=request_options)
+ return _response.data
async def get_logs(
self,
@@ -1469,24 +1164,14 @@ async def get_logs(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluations.get_logs(
- id="id",
- )
-
-
+ await client.evaluations.get_logs(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.get_logs(
+ _response = await self._raw_client.get_logs(
id, page=page, size=size, run_id=run_id, request_options=request_options
)
- return response.data
+ return _response.data
diff --git a/src/humanloop/evaluations/raw_client.py b/src/humanloop/evaluations/raw_client.py
index 082a30d2..85c3dbf3 100644
--- a/src/humanloop/evaluations/raw_client.py
+++ b/src/humanloop/evaluations/raw_client.py
@@ -1,29 +1,30 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
-from ..requests.file_request import FileRequestParams
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.evaluation_response import EvaluationResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..requests.file_request import FileRequestParams
+from ..types.evaluation_response import EvaluationResponse
+from ..types.evaluation_run_response import EvaluationRunResponse
+from ..types.evaluation_runs_response import EvaluationRunsResponse
+from ..types.evaluation_stats import EvaluationStats
+from ..types.evaluation_status import EvaluationStatus
from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
+from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
+from ..types.paginated_evaluation_response import PaginatedEvaluationResponse
from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams
-from ..core.jsonable_encoder import jsonable_encoder
-from ..types.evaluation_runs_response import EvaluationRunsResponse
+from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
from .requests.create_run_request_dataset import CreateRunRequestDatasetParams
from .requests.create_run_request_version import CreateRunRequestVersionParams
-from ..types.evaluation_run_response import EvaluationRunResponse
-from ..types.evaluation_status import EvaluationStatus
-from ..types.evaluation_stats import EvaluationStats
-from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -33,6 +34,84 @@ class RawEvaluationsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def list(
+ self,
+ *,
+ file_id: str,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[EvaluationResponse]:
+ """
+ Retrieve a list of Evaluations for the specified File.
+
+ Parameters
+ ----------
+ file_id : str
+ Filter by File ID. Only Evaluations for the specified File will be returned.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Evaluations to fetch.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[EvaluationResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "evaluations",
+ method="GET",
+ params={
+ "file_id": file_id,
+ "page": page,
+ "size": size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedEvaluationResponse,
+ construct_type(
+ type_=PaginatedEvaluationResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ file_id=file_id,
+ page=page + 1,
+ size=size,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
def create(
self,
*,
@@ -99,18 +178,19 @@ def create(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def add_evaluators(
self,
@@ -168,18 +248,19 @@ def add_evaluators(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_evaluator(
self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -222,18 +303,19 @@ def remove_evaluator(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -277,18 +359,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -318,18 +401,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_runs_for_evaluation(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -367,18 +451,19 @@ def list_runs_for_evaluation(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def create_run(
self,
@@ -462,18 +547,19 @@ def create_run(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def add_existing_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -517,18 +603,19 @@ def add_existing_run(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -564,18 +651,19 @@ def remove_run(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_evaluation_run(
self,
@@ -639,18 +727,19 @@ def update_evaluation_run(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def add_logs_to_run(
self,
@@ -706,18 +795,19 @@ def add_logs_to_run(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get_stats(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -758,18 +848,19 @@ def get_stats(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get_logs(
self,
@@ -829,24 +920,106 @@ def get_logs(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawEvaluationsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
+ async def list(
+ self,
+ *,
+ file_id: str,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[EvaluationResponse]:
+ """
+ Retrieve a list of Evaluations for the specified File.
+
+ Parameters
+ ----------
+ file_id : str
+ Filter by File ID. Only Evaluations for the specified File will be returned.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Evaluations to fetch.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[EvaluationResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "evaluations",
+ method="GET",
+ params={
+ "file_id": file_id,
+ "page": page,
+ "size": size,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedEvaluationResponse,
+ construct_type(
+ type_=PaginatedEvaluationResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ file_id=file_id,
+ page=page + 1,
+ size=size,
+ request_options=request_options,
+ )
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
async def create(
self,
*,
@@ -913,18 +1086,19 @@ async def create(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def add_evaluators(
self,
@@ -982,18 +1156,19 @@ async def add_evaluators(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_evaluator(
self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1036,18 +1211,19 @@ async def remove_evaluator(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1091,18 +1267,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1134,18 +1311,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_runs_for_evaluation(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1183,18 +1361,19 @@ async def list_runs_for_evaluation(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def create_run(
self,
@@ -1278,18 +1457,19 @@ async def create_run(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def add_existing_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1333,18 +1513,19 @@ async def add_existing_run(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_run(
self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1380,18 +1561,19 @@ async def remove_run(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_evaluation_run(
self,
@@ -1455,18 +1637,19 @@ async def update_evaluation_run(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def add_logs_to_run(
self,
@@ -1522,18 +1705,19 @@ async def add_logs_to_run(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get_stats(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1574,18 +1758,19 @@ async def get_stats(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get_logs(
self,
@@ -1645,15 +1830,16 @@ async def get_logs(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/evaluations/requests/__init__.py b/src/humanloop/evaluations/requests/__init__.py
index a9508cba..1997f1a0 100644
--- a/src/humanloop/evaluations/requests/__init__.py
+++ b/src/humanloop/evaluations/requests/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams
from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams
from .create_run_request_dataset import CreateRunRequestDatasetParams
diff --git a/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py
index 52b95526..24da1248 100644
--- a/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py
+++ b/src/humanloop/evaluations/requests/add_evaluators_request_evaluators_item.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.evaluator_version_id import EvaluatorVersionIdParams
+
from ...requests.evaluator_file_id import EvaluatorFileIdParams
from ...requests.evaluator_file_path import EvaluatorFilePathParams
+from ...requests.evaluator_version_id import EvaluatorVersionIdParams
AddEvaluatorsRequestEvaluatorsItemParams = typing.Union[
EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams
diff --git a/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py
index e26e438c..a53624c0 100644
--- a/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py
+++ b/src/humanloop/evaluations/requests/create_evaluation_request_evaluators_item.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.evaluator_version_id import EvaluatorVersionIdParams
+
from ...requests.evaluator_file_id import EvaluatorFileIdParams
from ...requests.evaluator_file_path import EvaluatorFilePathParams
+from ...requests.evaluator_version_id import EvaluatorVersionIdParams
CreateEvaluationRequestEvaluatorsItemParams = typing.Union[
EvaluatorVersionIdParams, EvaluatorFileIdParams, EvaluatorFilePathParams
diff --git a/src/humanloop/evaluations/requests/create_run_request_dataset.py b/src/humanloop/evaluations/requests/create_run_request_dataset.py
index 555b83bc..cabeb7f2 100644
--- a/src/humanloop/evaluations/requests/create_run_request_dataset.py
+++ b/src/humanloop/evaluations/requests/create_run_request_dataset.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.version_id import VersionIdParams
+
from ...requests.file_id import FileIdParams
from ...requests.file_path import FilePathParams
+from ...requests.version_id import VersionIdParams
CreateRunRequestDatasetParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams]
diff --git a/src/humanloop/evaluations/requests/create_run_request_version.py b/src/humanloop/evaluations/requests/create_run_request_version.py
index 76487576..830ee49e 100644
--- a/src/humanloop/evaluations/requests/create_run_request_version.py
+++ b/src/humanloop/evaluations/requests/create_run_request_version.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.version_id import VersionIdParams
+
from ...requests.file_id import FileIdParams
from ...requests.file_path import FilePathParams
+from ...requests.version_id import VersionIdParams
CreateRunRequestVersionParams = typing.Union[VersionIdParams, FileIdParams, FilePathParams]
diff --git a/src/humanloop/evaluations/types/__init__.py b/src/humanloop/evaluations/types/__init__.py
index 74045089..508249fb 100644
--- a/src/humanloop/evaluations/types/__init__.py
+++ b/src/humanloop/evaluations/types/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItem
from .create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItem
from .create_run_request_dataset import CreateRunRequestDataset
diff --git a/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py b/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py
index b7510e4b..3e4bbe23 100644
--- a/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py
+++ b/src/humanloop/evaluations/types/add_evaluators_request_evaluators_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.evaluator_version_id import EvaluatorVersionId
+
from ...types.evaluator_file_id import EvaluatorFileId
from ...types.evaluator_file_path import EvaluatorFilePath
+from ...types.evaluator_version_id import EvaluatorVersionId
AddEvaluatorsRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath]
diff --git a/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py b/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py
index f9fd39a8..448585eb 100644
--- a/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py
+++ b/src/humanloop/evaluations/types/create_evaluation_request_evaluators_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.evaluator_version_id import EvaluatorVersionId
+
from ...types.evaluator_file_id import EvaluatorFileId
from ...types.evaluator_file_path import EvaluatorFilePath
+from ...types.evaluator_version_id import EvaluatorVersionId
CreateEvaluationRequestEvaluatorsItem = typing.Union[EvaluatorVersionId, EvaluatorFileId, EvaluatorFilePath]
diff --git a/src/humanloop/evaluations/types/create_run_request_dataset.py b/src/humanloop/evaluations/types/create_run_request_dataset.py
index 757826f7..b915987e 100644
--- a/src/humanloop/evaluations/types/create_run_request_dataset.py
+++ b/src/humanloop/evaluations/types/create_run_request_dataset.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.version_id import VersionId
+
from ...types.file_id import FileId
from ...types.file_path import FilePath
+from ...types.version_id import VersionId
CreateRunRequestDataset = typing.Union[VersionId, FileId, FilePath]
diff --git a/src/humanloop/evaluations/types/create_run_request_version.py b/src/humanloop/evaluations/types/create_run_request_version.py
index ef0bca95..6d383dd8 100644
--- a/src/humanloop/evaluations/types/create_run_request_version.py
+++ b/src/humanloop/evaluations/types/create_run_request_version.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.version_id import VersionId
+
from ...types.file_id import FileId
from ...types.file_path import FilePath
+from ...types.version_id import VersionId
CreateRunRequestVersion = typing.Union[VersionId, FileId, FilePath]
diff --git a/src/humanloop/evaluators/__init__.py b/src/humanloop/evaluators/__init__.py
index 182c7d1d..480476b3 100644
--- a/src/humanloop/evaluators/__init__.py
+++ b/src/humanloop/evaluators/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import CreateEvaluatorLogRequestJudgment, CreateEvaluatorLogRequestSpec, EvaluatorRequestSpec
from .requests import (
CreateEvaluatorLogRequestJudgmentParams,
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 88ec32c8..69fff10c 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -1,37 +1,29 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawEvaluatorsClient
import datetime as dt
-from ..types.log_status import LogStatus
-from ..requests.chat_message import ChatMessageParams
-from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
-from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.evaluator_response import EvaluatorResponse
-from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from .requests.evaluator_request_spec import EvaluatorRequestSpecParams
-from ..types.list_evaluators import ListEvaluators
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawEvaluatorsClient
-from ..core.pagination import AsyncPager
+from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
+from ..types.evaluator_response import EvaluatorResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.list_evaluators import ListEvaluators
+from ..types.log_status import LogStatus
+from ..types.sort_order import SortOrder
+from .raw_client import AsyncRawEvaluatorsClient, RawEvaluatorsClient
+from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
+from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
+from .requests.evaluator_request_spec import EvaluatorRequestSpecParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -186,15 +178,10 @@ def log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.log(
- parent_id="parent_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.log(parent_id='parent_id', )
"""
- response = self._raw_client.log(
+ _response = self._raw_client.log(
parent_id=parent_id,
version_id=version_id,
environment=environment,
@@ -225,7 +212,7 @@ def log(
spec=spec,
request_options=request_options,
)
- return response.data
+ return _response.data
def list(
self,
@@ -234,7 +221,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[EvaluatorResponse]:
@@ -255,7 +242,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
@@ -272,68 +259,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.evaluators.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.evaluators.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "evaluators",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataEvaluatorResponse,
- construct_type(
- type_=PaginatedDataEvaluatorResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -381,23 +323,10 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', )
"""
- response = self._raw_client.upsert(
+ _response = self._raw_client.upsert(
spec=spec,
path=path,
id=id,
@@ -405,7 +334,7 @@ def upsert(
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
def get(
self,
@@ -443,18 +372,13 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.get(
- id="ev_890bcd",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.get(id='ev_890bcd', )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -475,16 +399,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.delete(
- id="ev_890bcd",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.delete(id='ev_890bcd', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -519,17 +438,11 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.move(id='ev_890bcd', path='new directory/new name', )
"""
- response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
def list_versions(
self,
@@ -560,18 +473,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.list_versions(
- id="ev_890bcd",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.list_versions(id='ev_890bcd', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
def delete_evaluator_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -597,17 +505,11 @@ def delete_evaluator_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.delete_evaluator_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options)
+ return _response.data
def update_evaluator_version(
self,
@@ -646,19 +548,13 @@ def update_evaluator_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.update_evaluator_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.update_evaluator_version(
+ _response = self._raw_client.update_evaluator_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -691,20 +587,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -733,17 +622,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -767,16 +650,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.list_environments(
- id="ev_890bcd",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.list_environments(id='ev_890bcd', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
def update_monitoring(
self,
@@ -813,18 +691,13 @@ def update_monitoring(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.evaluators.update_monitoring(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.evaluators.update_monitoring(id='id', )
"""
- response = self._raw_client.update_monitoring(
+ _response = self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
class AsyncEvaluatorsClient:
@@ -975,24 +848,14 @@ async def log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.log(
- parent_id="parent_id",
- )
-
-
+ await client.evaluators.log(parent_id='parent_id', )
asyncio.run(main())
"""
- response = await self._raw_client.log(
+ _response = await self._raw_client.log(
parent_id=parent_id,
version_id=version_id,
environment=environment,
@@ -1023,7 +886,7 @@ async def main() -> None:
spec=spec,
request_options=request_options,
)
- return response.data
+ return _response.data
async def list(
self,
@@ -1032,7 +895,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[EvaluatorResponse]:
@@ -1053,7 +916,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
@@ -1069,77 +932,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.evaluators.list(
- size=1,
- )
+ response = await client.evaluators.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "evaluators",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataEvaluatorResponse,
- construct_type(
- type_=PaginatedDataEvaluatorResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -1186,32 +1000,14 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.upsert(
- path="Shared Evaluators/Accuracy Evaluator",
- spec={
- "arguments_type": "target_required",
- "return_type": "number",
- "evaluator_type": "python",
- "code": "def evaluate(answer, target):\n return 0.5",
- },
- version_name="simple-evaluator",
- version_description="Simple evaluator that returns 0.5",
- )
-
-
+ await client.evaluators.upsert(path='Shared Evaluators/Accuracy Evaluator', spec={'arguments_type': "target_required", 'return_type': "number", 'evaluator_type': 'python', 'code': 'def evaluate(answer, target):\n return 0.5'}, version_name='simple-evaluator', version_description='Simple evaluator that returns 0.5', )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
spec=spec,
path=path,
id=id,
@@ -1219,7 +1015,7 @@ async def main() -> None:
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
async def get(
self,
@@ -1256,27 +1052,17 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.get(
- id="ev_890bcd",
- )
-
-
+ await client.evaluators.get(id='ev_890bcd', )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1296,25 +1082,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.delete(
- id="ev_890bcd",
- )
-
-
+ await client.evaluators.delete(id='ev_890bcd', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -1348,26 +1124,15 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.move(
- id="ev_890bcd",
- path="new directory/new name",
- )
-
-
+ await client.evaluators.move(id='ev_890bcd', path='new directory/new name', )
asyncio.run(main())
"""
- response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
async def list_versions(
self,
@@ -1397,27 +1162,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.list_versions(
- id="ev_890bcd",
- )
-
-
+ await client.evaluators.list_versions(id='ev_890bcd', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
async def delete_evaluator_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1442,26 +1197,15 @@ async def delete_evaluator_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.delete_evaluator_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.evaluators.delete_evaluator_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options)
+ return _response.data
async def update_evaluator_version(
self,
@@ -1499,28 +1243,17 @@ async def update_evaluator_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.update_evaluator_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.evaluators.update_evaluator_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_evaluator_version(
+ _response = await self._raw_client.update_evaluator_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1552,29 +1285,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.set_deployment(
- id="ev_890bcd",
- environment_id="staging",
- version_id="evv_012def",
- )
-
-
+ await client.evaluators.set_deployment(id='ev_890bcd', environment_id='staging', version_id='evv_012def', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1602,26 +1323,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.remove_deployment(
- id="ev_890bcd",
- environment_id="staging",
- )
-
-
+ await client.evaluators.remove_deployment(id='ev_890bcd', environment_id='staging', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1644,25 +1354,15 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.list_environments(
- id="ev_890bcd",
- )
-
-
+ await client.evaluators.list_environments(id='ev_890bcd', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
async def update_monitoring(
self,
@@ -1698,24 +1398,14 @@ async def update_monitoring(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.evaluators.update_monitoring(
- id="id",
- )
-
-
+ await client.evaluators.update_monitoring(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_monitoring(
+ _response = await self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py
index 5344f43f..8aeb32bc 100644
--- a/src/humanloop/evaluators/raw_client.py
+++ b/src/humanloop/evaluators/raw_client.py
@@ -1,34 +1,37 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
import datetime as dt
-from ..types.log_status import LogStatus
-from ..requests.chat_message import ChatMessageParams
-from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
-from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from .requests.evaluator_request_spec import EvaluatorRequestSpecParams
-from ..types.evaluator_response import EvaluatorResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..types.list_evaluators import ListEvaluators
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
+from ..types.evaluator_response import EvaluatorResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.http_validation_error import HttpValidationError
+from ..types.list_evaluators import ListEvaluators
+from ..types.log_status import LogStatus
+from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
+from ..types.sort_order import SortOrder
+from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
+from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
+from .requests.evaluator_request_spec import EvaluatorRequestSpecParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -228,18 +231,115 @@ def log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[EvaluatorResponse]:
+ """
+ Get a list of all Evaluators.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Evaluators to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Evaluator name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Evaluators by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[EvaluatorResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "evaluators",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataEvaluatorResponse,
+ construct_type(
+ type_=PaginatedDataEvaluatorResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upsert(
self,
@@ -314,18 +414,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -381,18 +482,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -420,18 +522,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -488,18 +591,19 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -547,18 +651,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_evaluator_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -591,18 +696,19 @@ def delete_evaluator_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_evaluator_version(
self,
@@ -645,6 +751,9 @@ def update_evaluator_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -660,18 +769,19 @@ def update_evaluator_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -721,18 +831,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -768,18 +879,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -817,18 +929,19 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_monitoring(
self,
@@ -877,6 +990,9 @@ def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -892,18 +1008,19 @@ def update_monitoring(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawEvaluatorsClient:
@@ -1100,18 +1217,118 @@ async def log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[EvaluatorResponse]:
+ """
+ Get a list of all Evaluators.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Evaluators to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Evaluator name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Evaluators by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[EvaluatorResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "evaluators",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataEvaluatorResponse,
+ construct_type(
+ type_=PaginatedDataEvaluatorResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upsert(
self,
@@ -1186,18 +1403,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -1253,18 +1471,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1294,18 +1513,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -1362,18 +1582,19 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -1421,18 +1642,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_evaluator_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1465,18 +1687,19 @@ async def delete_evaluator_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_evaluator_version(
self,
@@ -1519,6 +1742,9 @@ async def update_evaluator_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1534,18 +1760,19 @@ async def update_evaluator_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1595,18 +1822,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1642,18 +1870,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1691,18 +1920,19 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_monitoring(
self,
@@ -1751,6 +1981,9 @@ async def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1766,15 +1999,16 @@ async def update_monitoring(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/evaluators/requests/__init__.py b/src/humanloop/evaluators/requests/__init__.py
index 7ab68cd4..6a00390a 100644
--- a/src/humanloop/evaluators/requests/__init__.py
+++ b/src/humanloop/evaluators/requests/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
from .evaluator_request_spec import EvaluatorRequestSpecParams
diff --git a/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py
index 2ec95e70..0e6539ed 100644
--- a/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py
+++ b/src/humanloop/evaluators/requests/create_evaluator_log_request_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams
+
from ...requests.code_evaluator_request import CodeEvaluatorRequestParams
-from ...requests.human_evaluator_request import HumanEvaluatorRequestParams
from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams
+from ...requests.human_evaluator_request import HumanEvaluatorRequestParams
+from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams
CreateEvaluatorLogRequestSpecParams = typing.Union[
LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams
diff --git a/src/humanloop/evaluators/requests/evaluator_request_spec.py b/src/humanloop/evaluators/requests/evaluator_request_spec.py
index 1a3c35df..7bd0d395 100644
--- a/src/humanloop/evaluators/requests/evaluator_request_spec.py
+++ b/src/humanloop/evaluators/requests/evaluator_request_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams
+
from ...requests.code_evaluator_request import CodeEvaluatorRequestParams
-from ...requests.human_evaluator_request import HumanEvaluatorRequestParams
from ...requests.external_evaluator_request import ExternalEvaluatorRequestParams
+from ...requests.human_evaluator_request import HumanEvaluatorRequestParams
+from ...requests.llm_evaluator_request import LlmEvaluatorRequestParams
EvaluatorRequestSpecParams = typing.Union[
LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams
diff --git a/src/humanloop/evaluators/types/__init__.py b/src/humanloop/evaluators/types/__init__.py
index cecd4117..09e95d81 100644
--- a/src/humanloop/evaluators/types/__init__.py
+++ b/src/humanloop/evaluators/types/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgment
from .create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpec
from .evaluator_request_spec import EvaluatorRequestSpec
diff --git a/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py b/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py
index 5fbb7cb0..0f22560c 100644
--- a/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py
+++ b/src/humanloop/evaluators/types/create_evaluator_log_request_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.llm_evaluator_request import LlmEvaluatorRequest
+
from ...types.code_evaluator_request import CodeEvaluatorRequest
-from ...types.human_evaluator_request import HumanEvaluatorRequest
from ...types.external_evaluator_request import ExternalEvaluatorRequest
+from ...types.human_evaluator_request import HumanEvaluatorRequest
+from ...types.llm_evaluator_request import LlmEvaluatorRequest
CreateEvaluatorLogRequestSpec = typing.Union[
LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest
diff --git a/src/humanloop/evaluators/types/evaluator_request_spec.py b/src/humanloop/evaluators/types/evaluator_request_spec.py
index aec9ca8e..3f31af3f 100644
--- a/src/humanloop/evaluators/types/evaluator_request_spec.py
+++ b/src/humanloop/evaluators/types/evaluator_request_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.llm_evaluator_request import LlmEvaluatorRequest
+
from ...types.code_evaluator_request import CodeEvaluatorRequest
-from ...types.human_evaluator_request import HumanEvaluatorRequest
from ...types.external_evaluator_request import ExternalEvaluatorRequest
+from ...types.human_evaluator_request import HumanEvaluatorRequest
+from ...types.llm_evaluator_request import LlmEvaluatorRequest
EvaluatorRequestSpec = typing.Union[
LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest
diff --git a/src/humanloop/files/__init__.py b/src/humanloop/files/__init__.py
index df4bcb87..7b3a69b5 100644
--- a/src/humanloop/files/__init__.py
+++ b/src/humanloop/files/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import RetrieveByPathFilesRetrieveByPathPostResponse
from .requests import RetrieveByPathFilesRetrieveByPathPostResponseParams
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index 693b46cb..407ba0e9 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -1,18 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawFilesClient
-from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.request_options import RequestOptions
+from ..types.file_sort_by import FileSortBy
+from ..types.file_type import FileType
from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
)
+from ..types.sort_order import SortOrder
+from .raw_client import AsyncRawFilesClient, RawFilesClient
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawFilesClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -39,11 +38,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -60,6 +61,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +73,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,30 +93,30 @@ def list_files(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
client.files.list_files()
"""
- response = self._raw_client.list_files(
+ _response = self._raw_client.list_files(
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
- return response.data
+ return _response.data
def retrieve_by_path(
self,
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +130,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -134,18 +144,16 @@ def retrieve_by_path(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.files.retrieve_by_path(
- path="path",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.files.retrieve_by_path(path='path', )
"""
- response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ _response = self._raw_client.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
- return response.data
+ return _response.data
class AsyncFilesClient:
@@ -169,11 +177,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -190,6 +200,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -199,12 +212,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -215,39 +231,34 @@ async def list_files(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
await client.files.list_files()
-
-
asyncio.run(main())
"""
- response = await self._raw_client.list_files(
+ _response = await self._raw_client.list_files(
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
- return response.data
+ return _response.data
async def retrieve_by_path(
self,
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +272,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -271,24 +285,17 @@ async def retrieve_by_path(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.files.retrieve_by_path(
- path="path",
- )
-
-
+ await client.files.retrieve_by_path(path='path', )
asyncio.run(main())
"""
- response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ _response = await self._raw_client.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
- return response.data
+ return _response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 01b48e03..2f5f2d05 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -1,23 +1,22 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
- PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
-)
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
+from ..types.file_sort_by import FileSortBy
+from ..types.file_type import FileType
from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
+from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse,
+)
+from ..types.sort_order import SortOrder
from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -33,11 +32,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -56,6 +57,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -65,12 +69,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,11 +93,13 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -106,24 +115,26 @@ def list_files(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def retrieve_by_path(
self,
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -137,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -150,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
@@ -172,18 +187,19 @@ def retrieve_by_path(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawFilesClient:
@@ -196,11 +212,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -219,6 +237,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -228,12 +249,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -249,11 +273,13 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -269,24 +295,26 @@ async def list_files(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def retrieve_by_path(
self,
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -300,6 +328,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -313,6 +344,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
@@ -335,15 +367,16 @@ async def retrieve_by_path(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/files/requests/__init__.py b/src/humanloop/files/requests/__init__.py
index 2f3142fd..c4ae6bb0 100644
--- a/src/humanloop/files/requests/__init__.py
+++ b/src/humanloop/files/requests/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponseParams
__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponseParams"]
diff --git a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
index 8c070ab3..20c1bef0 100644
--- a/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/requests/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...requests.prompt_response import PromptResponseParams
-from ...requests.tool_response import ToolResponseParams
+
+from ...requests.agent_response import AgentResponseParams
from ...requests.dataset_response import DatasetResponseParams
from ...requests.evaluator_response import EvaluatorResponseParams
from ...requests.flow_response import FlowResponseParams
-from ...requests.agent_response import AgentResponseParams
+from ...requests.prompt_response import PromptResponseParams
+from ...requests.tool_response import ToolResponseParams
RetrieveByPathFilesRetrieveByPathPostResponseParams = typing.Union[
PromptResponseParams,
diff --git a/src/humanloop/files/types/__init__.py b/src/humanloop/files/types/__init__.py
index f2e3b2da..c34673a3 100644
--- a/src/humanloop/files/types/__init__.py
+++ b/src/humanloop/files/types/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse
__all__ = ["RetrieveByPathFilesRetrieveByPathPostResponse"]
diff --git a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
index 46ea271a..c3dd6cb7 100644
--- a/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
+++ b/src/humanloop/files/types/retrieve_by_path_files_retrieve_by_path_post_response.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from ...types.prompt_response import PromptResponse
-from ...types.tool_response import ToolResponse
+
+from ...types.agent_response import AgentResponse
from ...types.dataset_response import DatasetResponse
from ...types.evaluator_response import EvaluatorResponse
from ...types.flow_response import FlowResponse
-from ...types.agent_response import AgentResponse
+from ...types.prompt_response import PromptResponse
+from ...types.tool_response import ToolResponse
RetrieveByPathFilesRetrieveByPathPostResponse = typing.Union[
PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
diff --git a/src/humanloop/flows/__init__.py b/src/humanloop/flows/__init__.py
index f3ea2659..5cde0202 100644
--- a/src/humanloop/flows/__init__.py
+++ b/src/humanloop/flows/__init__.py
@@ -1,2 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index bcb9491c..802c52d8 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -1,36 +1,28 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawFlowsClient
-from ..requests.chat_message import ChatMessageParams
import datetime as dt
-from ..types.log_status import LogStatus
-from ..requests.flow_kernel_request import FlowKernelRequestParams
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..types.create_flow_log_response import CreateFlowLogResponse
-from ..types.flow_log_response import FlowLogResponse
-from ..types.flow_response import FlowResponse
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.paginated_data_flow_response import PaginatedDataFlowResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..types.list_flows import ListFlows
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawFlowsClient
-from ..core.pagination import AsyncPager
+from ..requests.flow_kernel_request import FlowKernelRequestParams
+from ..types.create_flow_log_response import CreateFlowLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.flow_log_response import FlowLogResponse
+from ..types.flow_response import FlowResponse
+from ..types.list_flows import ListFlows
+from ..types.log_status import LogStatus
+from ..types.sort_order import SortOrder
+from .raw_client import AsyncRawFlowsClient, RawFlowsClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -185,43 +177,15 @@ def log(
Examples
--------
- import datetime
-
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
- ),
- )
+ import datetime
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8}
+ , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+ }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.'
+ }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 22:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 22:40:39+00:00", ), )
"""
- response = self._raw_client.log(
+ _response = self._raw_client.log(
version_id=version_id,
environment=environment,
messages=messages,
@@ -251,7 +215,7 @@ def log(
flow=flow,
request_options=request_options,
)
- return response.data
+ return _response.data
def update_log(
self,
@@ -307,20 +271,11 @@ def update_log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.'
+ }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", )
"""
- response = self._raw_client.update_log(
+ _response = self._raw_client.update_log(
log_id,
messages=messages,
output_message=output_message,
@@ -330,7 +285,7 @@ def update_log(
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
def get(
self,
@@ -368,18 +323,13 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -400,16 +350,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -448,19 +393,13 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', )
"""
- response = self._raw_client.move(
+ _response = self._raw_client.move(
id, path=path, name=name, directory_id=directory_id, request_options=request_options
)
- return response.data
+ return _response.data
def list(
self,
@@ -469,7 +408,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[FlowResponse]:
@@ -490,7 +429,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
@@ -507,68 +446,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.flows.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.flows.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "flows",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataFlowResponse,
- construct_type(
- type_=PaginatedDataFlowResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -617,29 +511,14 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8}
+ , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+ , 'version_name': 'medqa-flow-v1'
+ , 'version_description': 'Initial version'
+ }, )
"""
- response = self._raw_client.upsert(
+ _response = self._raw_client.upsert(
attributes=attributes,
path=path,
id=id,
@@ -647,7 +526,7 @@ def upsert(
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
def list_versions(
self,
@@ -678,18 +557,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
def delete_flow_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -715,17 +589,11 @@ def delete_flow_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.delete_flow_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.delete_flow_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.delete_flow_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_flow_version(id, version_id, request_options=request_options)
+ return _response.data
def update_flow_version(
self,
@@ -764,19 +632,13 @@ def update_flow_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.update_flow_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.update_flow_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.update_flow_version(
+ _response = self._raw_client.update_flow_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -809,20 +671,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -851,17 +706,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -885,16 +734,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
def update_monitoring(
self,
@@ -931,19 +775,13 @@ def update_monitoring(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
"""
- response = self._raw_client.update_monitoring(
+ _response = self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
class AsyncFlowsClient:
@@ -1095,50 +933,18 @@ async def log(
Examples
--------
- import asyncio
- import datetime
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import datetime
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.log(
- id="fl_6o701g4jmcanPVHxdqD0O",
- flow={
- "attributes": {
- "prompt": {
- "template": "You are a helpful assistant helping with medical anamnesis",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- }
- },
- inputs={
- "question": "Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="incomplete",
- start_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:35+00:00",
- ),
- end_time=datetime.datetime.fromisoformat(
- "2024-07-08 21:40:39+00:00",
- ),
- )
-
-
+ await client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {'template': 'You are a helpful assistant helping with medical anamnesis', 'model': 'gpt-4o', 'temperature': 0.8}
+ , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+ }}, inputs={'question': 'Patient with a history of diabetes and hypertension presents with chest pain and shortness of breath.'
+ }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="incomplete", start_time=datetime.datetime.fromisoformat("2024-07-08 22:40:35+00:00", ), end_time=datetime.datetime.fromisoformat("2024-07-08 22:40:39+00:00", ), )
asyncio.run(main())
"""
- response = await self._raw_client.log(
+ _response = await self._raw_client.log(
version_id=version_id,
environment=environment,
messages=messages,
@@ -1168,7 +974,7 @@ async def main() -> None:
flow=flow,
request_options=request_options,
)
- return response.data
+ return _response.data
async def update_log(
self,
@@ -1223,29 +1029,15 @@ async def update_log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.update_log(
- log_id="medqa_experiment_0001",
- inputs={
- "question": "Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath."
- },
- output="The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.",
- log_status="complete",
- )
-
-
+ await client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Patient with a history of diabetes and normal tension presents with chest pain and shortness of breath.'
+ }, output='The patient is likely experiencing a myocardial infarction. Immediate medical attention is required.', log_status="complete", )
asyncio.run(main())
"""
- response = await self._raw_client.update_log(
+ _response = await self._raw_client.update_log(
log_id,
messages=messages,
output_message=output_message,
@@ -1255,7 +1047,7 @@ async def main() -> None:
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
async def get(
self,
@@ -1292,27 +1084,17 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.get(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
-
-
+ await client.flows.get(id='fl_6o701g4jmcanPVHxdqD0O', )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1332,25 +1114,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.delete(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
-
-
+ await client.flows.delete(id='fl_6o701g4jmcanPVHxdqD0O', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -1388,28 +1160,17 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.move(
- id="fl_6o701g4jmcanPVHxdqD0O",
- path="new directory/new name",
- )
-
-
+ await client.flows.move(id='fl_6o701g4jmcanPVHxdqD0O', path='new directory/new name', )
asyncio.run(main())
"""
- response = await self._raw_client.move(
+ _response = await self._raw_client.move(
id, path=path, name=name, directory_id=directory_id, request_options=request_options
)
- return response.data
+ return _response.data
async def list(
self,
@@ -1418,7 +1179,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[FlowResponse]:
@@ -1439,7 +1200,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
@@ -1455,77 +1216,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.flows.list(
- size=1,
- )
+ response = await client.flows.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "flows",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataFlowResponse,
- construct_type(
- type_=PaginatedDataFlowResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -1573,38 +1285,18 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.upsert(
- path="Personal Projects/MedQA Flow",
- attributes={
- "prompt": {
- "template": "You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}",
- "model": "gpt-4o",
- "temperature": 0.8,
- },
- "tool": {
- "name": "retrieval_tool_v3",
- "description": "Retrieval tool for MedQA.",
- "source_code": "def retrieval_tool(question: str) -> str:\n pass\n",
- },
- "version_name": "medqa-flow-v1",
- "version_description": "Initial version",
- },
- )
-
-
+ await client.flows.upsert(path='Personal Projects/MedQA Flow', attributes={'prompt': {'template': 'You are a helpful medical assistant helping with medical anamnesis. Answer {{question}}', 'model': 'gpt-4o', 'temperature': 0.8}
+ , 'tool': {'name': 'retrieval_tool_v3', 'description': 'Retrieval tool for MedQA.', 'source_code': 'def retrieval_tool(question: str) -> str:\n pass\n'}
+ , 'version_name': 'medqa-flow-v1'
+ , 'version_description': 'Initial version'
+ }, )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
attributes=attributes,
path=path,
id=id,
@@ -1612,7 +1304,7 @@ async def main() -> None:
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
async def list_versions(
self,
@@ -1642,27 +1334,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.list_versions(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
-
-
+ await client.flows.list_versions(id='fl_6o701g4jmcanPVHxdqD0O', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
async def delete_flow_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1687,26 +1369,15 @@ async def delete_flow_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.delete_flow_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.flows.delete_flow_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_flow_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_flow_version(id, version_id, request_options=request_options)
+ return _response.data
async def update_flow_version(
self,
@@ -1744,28 +1415,17 @@ async def update_flow_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.update_flow_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.flows.update_flow_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_flow_version(
+ _response = await self._raw_client.update_flow_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1797,29 +1457,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.set_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- version_id="flv_6o701g4jmcanPVHxdqD0O",
- )
-
-
+ await client.flows.set_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', version_id='flv_6o701g4jmcanPVHxdqD0O', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1847,26 +1495,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.remove_deployment(
- id="fl_6o701g4jmcanPVHxdqD0O",
- environment_id="staging",
- )
-
-
+ await client.flows.remove_deployment(id='fl_6o701g4jmcanPVHxdqD0O', environment_id='staging', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1889,25 +1526,15 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.list_environments(
- id="fl_6o701g4jmcanPVHxdqD0O",
- )
-
-
+ await client.flows.list_environments(id='fl_6o701g4jmcanPVHxdqD0O', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
async def update_monitoring(
self,
@@ -1943,25 +1570,14 @@ async def update_monitoring(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.flows.update_monitoring(
- id="fl_6o701g4jmcanPVHxdqD0O",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
-
-
+ await client.flows.update_monitoring(id='fl_6o701g4jmcanPVHxdqD0O', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
asyncio.run(main())
"""
- response = await self._raw_client.update_monitoring(
+ _response = await self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py
index 962f7eba..e3954572 100644
--- a/src/humanloop/flows/raw_client.py
+++ b/src/humanloop/flows/raw_client.py
@@ -1,33 +1,36 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..requests.chat_message import ChatMessageParams
import datetime as dt
-from ..types.log_status import LogStatus
-from ..requests.flow_kernel_request import FlowKernelRequestParams
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.create_flow_log_response import CreateFlowLogResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..types.flow_log_response import FlowLogResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..types.flow_response import FlowResponse
-from ..types.list_flows import ListFlows
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..requests.flow_kernel_request import FlowKernelRequestParams
+from ..types.create_flow_log_response import CreateFlowLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.flow_log_response import FlowLogResponse
+from ..types.flow_response import FlowResponse
+from ..types.http_validation_error import HttpValidationError
+from ..types.list_flows import ListFlows
+from ..types.log_status import LogStatus
+from ..types.paginated_data_flow_response import PaginatedDataFlowResponse
+from ..types.sort_order import SortOrder
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -227,18 +230,19 @@ def log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_log(
self,
@@ -324,18 +328,19 @@ def update_log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -391,18 +396,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -430,18 +436,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -503,18 +510,115 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[FlowResponse]:
+ """
+ Get a list of Flows.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Flows to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Flow name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Flows by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[FlowResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "flows",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataFlowResponse,
+ construct_type(
+ type_=PaginatedDataFlowResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upsert(
self,
@@ -588,18 +692,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -647,18 +752,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_flow_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -691,18 +797,19 @@ def delete_flow_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_flow_version(
self,
@@ -745,6 +852,9 @@ def update_flow_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -760,18 +870,19 @@ def update_flow_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -821,18 +932,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -868,18 +980,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -917,18 +1030,19 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_monitoring(
self,
@@ -977,6 +1091,9 @@ def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -992,18 +1109,19 @@ def update_monitoring(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawFlowsClient:
@@ -1200,18 +1318,19 @@ async def log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_log(
self,
@@ -1297,18 +1416,19 @@ async def update_log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -1364,18 +1484,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1405,18 +1526,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -1478,18 +1600,118 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[FlowResponse]:
+ """
+ Get a list of Flows.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Flows to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Flow name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Flows by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[FlowResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "flows",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataFlowResponse,
+ construct_type(
+ type_=PaginatedDataFlowResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upsert(
self,
@@ -1563,18 +1785,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -1622,18 +1845,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_flow_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1666,18 +1890,19 @@ async def delete_flow_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_flow_version(
self,
@@ -1720,6 +1945,9 @@ async def update_flow_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1735,18 +1963,19 @@ async def update_flow_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1796,18 +2025,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1843,18 +2073,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1892,18 +2123,19 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_monitoring(
self,
@@ -1952,6 +2184,9 @@ async def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1967,15 +2202,16 @@ async def update_monitoring(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/logs/__init__.py b/src/humanloop/logs/__init__.py
index f3ea2659..5cde0202 100644
--- a/src/humanloop/logs/__init__.py
+++ b/src/humanloop/logs/__init__.py
@@ -1,2 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 8733ed37..278c97cf 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -1,23 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawLogsClient
-import typing
-from ..types.version_status import VersionStatus
import datetime as dt
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..core.pagination import SyncPager
from ..types.log_response import LogResponse
-from ..core.datetime_utils import serialize_datetime
-from ..types.paginated_data_log_response import PaginatedDataLogResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawLogsClient
-from ..core.pagination import AsyncPager
+from .raw_client import AsyncRawLogsClient, RawLogsClient
class LogsClient:
@@ -108,83 +98,30 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.logs.list(
- file_id="file_123abc",
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.logs.list(file_id='file_123abc', size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "logs",
- method="GET",
- params={
- "file_id": file_id,
- "page": page,
- "size": size,
- "version_id": version_id,
- "id": id,
- "search": search,
- "metadata_search": metadata_search,
- "start_date": serialize_datetime(start_date) if start_date is not None else None,
- "end_date": serialize_datetime(end_date) if end_date is not None else None,
- "include_parent": include_parent,
- "in_trace_filter": in_trace_filter,
- "sample": sample,
- "include_trace_children": include_trace_children,
- },
+ return self._raw_client.list(
+ file_id=file_id,
+ page=page,
+ size=size,
+ version_id=version_id,
+ id=id,
+ search=search,
+ metadata_search=metadata_search,
+ start_date=start_date,
+ end_date=end_date,
+ include_parent=include_parent,
+ in_trace_filter=in_trace_filter,
+ sample=sample,
+ include_trace_children=include_trace_children,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataLogResponse,
- construct_type(
- type_=PaginatedDataLogResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- file_id=file_id,
- page=page + 1,
- size=size,
- version_id=version_id,
- id=id,
- search=search,
- metadata_search=metadata_search,
- start_date=start_date,
- end_date=end_date,
- include_parent=include_parent,
- in_trace_filter=in_trace_filter,
- sample=sample,
- include_trace_children=include_trace_children,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def delete(
self,
@@ -210,16 +147,11 @@ def delete(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.logs.delete(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
"""
- response = self._raw_client.delete(id=id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id=id, request_options=request_options)
+ return _response.data
def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse:
"""
@@ -241,16 +173,11 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.logs.get(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
"""
- response = self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get(id, request_options=request_options)
+ return _response.data
class AsyncLogsClient:
@@ -340,92 +267,35 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.logs.list(
- file_id="file_123abc",
- size=1,
- )
+ response = await client.logs.list(file_id='file_123abc', size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "logs",
- method="GET",
- params={
- "file_id": file_id,
- "page": page,
- "size": size,
- "version_id": version_id,
- "id": id,
- "search": search,
- "metadata_search": metadata_search,
- "start_date": serialize_datetime(start_date) if start_date is not None else None,
- "end_date": serialize_datetime(end_date) if end_date is not None else None,
- "include_parent": include_parent,
- "in_trace_filter": in_trace_filter,
- "sample": sample,
- "include_trace_children": include_trace_children,
- },
+ return await self._raw_client.list(
+ file_id=file_id,
+ page=page,
+ size=size,
+ version_id=version_id,
+ id=id,
+ search=search,
+ metadata_search=metadata_search,
+ start_date=start_date,
+ end_date=end_date,
+ include_parent=include_parent,
+ in_trace_filter=in_trace_filter,
+ sample=sample,
+ include_trace_children=include_trace_children,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataLogResponse,
- construct_type(
- type_=PaginatedDataLogResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- file_id=file_id,
- page=page + 1,
- size=size,
- version_id=version_id,
- id=id,
- search=search,
- metadata_search=metadata_search,
- start_date=start_date,
- end_date=end_date,
- include_parent=include_parent,
- in_trace_filter=in_trace_filter,
- sample=sample,
- include_trace_children=include_trace_children,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def delete(
self,
@@ -450,25 +320,15 @@ async def delete(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.logs.delete(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
- )
-
-
+ await client.logs.delete(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id=id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id=id, request_options=request_options)
+ return _response.data
async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse:
"""
@@ -489,22 +349,12 @@ async def get(self, id: str, *, request_options: typing.Optional[RequestOptions]
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.logs.get(
- id="prv_Wu6zx1lAWJRqOyL8nWuZk",
- )
-
-
+ await client.logs.get(id='prv_Wu6zx1lAWJRqOyL8nWuZk', )
asyncio.run(main())
"""
- response = await self._raw_client.get(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.get(id, request_options=request_options)
+ return _response.data
diff --git a/src/humanloop/logs/raw_client.py b/src/humanloop/logs/raw_client.py
index 3859278b..e155be92 100644
--- a/src/humanloop/logs/raw_client.py
+++ b/src/humanloop/logs/raw_client.py
@@ -1,24 +1,165 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.client_wrapper import SyncClientWrapper
+import datetime as dt
import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.datetime_utils import serialize_datetime
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
+from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
-from ..core.unchecked_base_model import construct_type
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
from ..types.log_response import LogResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..types.paginated_data_log_response import PaginatedDataLogResponse
class RawLogsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def list(
+ self,
+ *,
+ file_id: str,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ version_id: typing.Optional[str] = None,
+ id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ search: typing.Optional[str] = None,
+ metadata_search: typing.Optional[str] = None,
+ start_date: typing.Optional[dt.datetime] = None,
+ end_date: typing.Optional[dt.datetime] = None,
+ include_parent: typing.Optional[bool] = None,
+ in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None,
+ sample: typing.Optional[int] = None,
+ include_trace_children: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[LogResponse]:
+ """
+ List all Logs for the given filter criteria.
+
+ Parameters
+ ----------
+ file_id : str
+ Unique identifier for the File to list Logs for.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Logs to fetch.
+
+ version_id : typing.Optional[str]
+ If provided, only Logs belonging to the specified Version will be returned.
+
+ id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ If provided, returns Logs whose IDs contain any of the specified values as substrings.
+
+ search : typing.Optional[str]
+ If provided, only Logs that contain the provided string in its inputs and output will be returned.
+
+ metadata_search : typing.Optional[str]
+ If provided, only Logs that contain the provided string in its metadata will be returned.
+
+ start_date : typing.Optional[dt.datetime]
+ If provided, only Logs created after the specified date will be returned.
+
+ end_date : typing.Optional[dt.datetime]
+ If provided, only Logs created before the specified date will be returned.
+
+ include_parent : typing.Optional[bool]
+ If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs.
+
+ in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]]
+ If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
+
+ sample : typing.Optional[int]
+ If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[LogResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "logs",
+ method="GET",
+ params={
+ "file_id": file_id,
+ "page": page,
+ "size": size,
+ "version_id": version_id,
+ "id": id,
+ "search": search,
+ "metadata_search": metadata_search,
+ "start_date": serialize_datetime(start_date) if start_date is not None else None,
+ "end_date": serialize_datetime(end_date) if end_date is not None else None,
+ "include_parent": include_parent,
+ "in_trace_filter": in_trace_filter,
+ "sample": sample,
+ "include_trace_children": include_trace_children,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataLogResponse,
+ construct_type(
+ type_=PaginatedDataLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ file_id=file_id,
+ page=page + 1,
+ size=size,
+ version_id=version_id,
+ id=id,
+ search=search,
+ metadata_search=metadata_search,
+ start_date=start_date,
+ end_date=end_date,
+ include_parent=include_parent,
+ in_trace_filter=in_trace_filter,
+ sample=sample,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
def delete(
self,
*,
@@ -53,18 +194,19 @@ def delete(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[LogResponse]:
"""
@@ -100,24 +242,166 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawLogsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
+ async def list(
+ self,
+ *,
+ file_id: str,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ version_id: typing.Optional[str] = None,
+ id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
+ search: typing.Optional[str] = None,
+ metadata_search: typing.Optional[str] = None,
+ start_date: typing.Optional[dt.datetime] = None,
+ end_date: typing.Optional[dt.datetime] = None,
+ include_parent: typing.Optional[bool] = None,
+ in_trace_filter: typing.Optional[typing.Union[bool, typing.Sequence[bool]]] = None,
+ sample: typing.Optional[int] = None,
+ include_trace_children: typing.Optional[bool] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[LogResponse]:
+ """
+ List all Logs for the given filter criteria.
+
+ Parameters
+ ----------
+ file_id : str
+ Unique identifier for the File to list Logs for.
+
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Logs to fetch.
+
+ version_id : typing.Optional[str]
+ If provided, only Logs belonging to the specified Version will be returned.
+
+ id : typing.Optional[typing.Union[str, typing.Sequence[str]]]
+ If provided, returns Logs whose IDs contain any of the specified values as substrings.
+
+ search : typing.Optional[str]
+ If provided, only Logs that contain the provided string in its inputs and output will be returned.
+
+ metadata_search : typing.Optional[str]
+ If provided, only Logs that contain the provided string in its metadata will be returned.
+
+ start_date : typing.Optional[dt.datetime]
+ If provided, only Logs created after the specified date will be returned.
+
+ end_date : typing.Optional[dt.datetime]
+ If provided, only Logs created before the specified date will be returned.
+
+ include_parent : typing.Optional[bool]
+ If true, include the full parent Log in the response. Only applicable when retrieving Evaluator Logs.
+
+ in_trace_filter : typing.Optional[typing.Union[bool, typing.Sequence[bool]]]
+ If true, return Logs that are associated to a Trace. False, return Logs that are not associated to a Trace.
+
+ sample : typing.Optional[int]
+ If provided, limit the response to a random subset of logs from the filtered results. (This will be an approximate sample, not a strict limit.)
+
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the retrieved Logs. Only applicable when retrieving Flow or Agent Logs.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[LogResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "logs",
+ method="GET",
+ params={
+ "file_id": file_id,
+ "page": page,
+ "size": size,
+ "version_id": version_id,
+ "id": id,
+ "search": search,
+ "metadata_search": metadata_search,
+ "start_date": serialize_datetime(start_date) if start_date is not None else None,
+ "end_date": serialize_datetime(end_date) if end_date is not None else None,
+ "include_parent": include_parent,
+ "in_trace_filter": in_trace_filter,
+ "sample": sample,
+ "include_trace_children": include_trace_children,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataLogResponse,
+ construct_type(
+ type_=PaginatedDataLogResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ file_id=file_id,
+ page=page + 1,
+ size=size,
+ version_id=version_id,
+ id=id,
+ search=search,
+ metadata_search=metadata_search,
+ start_date=start_date,
+ end_date=end_date,
+ include_parent=include_parent,
+ in_trace_filter=in_trace_filter,
+ sample=sample,
+ include_trace_children=include_trace_children,
+ request_options=request_options,
+ )
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
async def delete(
self,
*,
@@ -152,18 +436,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -201,15 +486,16 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index ae141d57..dcff7e62 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,25 +1,35 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
@@ -30,8 +40,12 @@
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index d5de327b..6117e397 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -1,53 +1,47 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawPromptsClient
-from ..requests.chat_message import ChatMessageParams
-from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..types.create_prompt_log_response import CreatePromptLogResponse
-from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
-from ..types.log_response import LogResponse
-from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
-from ..requests.provider_api_keys import ProviderApiKeysParams
-from ..types.prompt_call_stream_response import PromptCallStreamResponse
-from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
-from ..types.prompt_call_response import PromptCallResponse
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.prompt_response import PromptResponse
-from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..types.model_endpoints import ModelEndpoints
-from .requests.prompt_request_template import PromptRequestTemplateParams
-from ..types.template_language import TemplateLanguage
-from ..types.model_providers import ModelProviders
-from .requests.prompt_request_stop import PromptRequestStopParams
-from ..requests.response_format import ResponseFormatParams
-from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
-from ..requests.tool_function import ToolFunctionParams
-from ..types.populate_template_response import PopulateTemplateResponse
-from ..types.list_prompts import ListPrompts
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..requests.response_format import ResponseFormatParams
+from ..requests.tool_function import ToolFunctionParams
+from ..types.create_prompt_log_response import CreatePromptLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.list_prompts import ListPrompts
+from ..types.log_response import LogResponse
+from ..types.log_status import LogStatus
+from ..types.model_endpoints import ModelEndpoints
+from ..types.model_providers import ModelProviders
+from ..types.populate_template_response import PopulateTemplateResponse
+from ..types.prompt_call_response import PromptCallResponse
+from ..types.prompt_call_stream_response import PromptCallStreamResponse
from ..types.prompt_kernel_request import PromptKernelRequest
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawPromptsClient
-from ..core.pagination import AsyncPager
+from ..types.prompt_response import PromptResponse
+from ..types.sort_order import SortOrder
+from ..types.template_language import TemplateLanguage
+from .raw_client import AsyncRawPromptsClient, RawPromptsClient
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
+from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
+from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
+from .requests.prompt_request_stop import PromptRequestStopParams
+from .requests.prompt_request_template import PromptRequestTemplateParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
+from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
+from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +79,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -166,8 +160,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -236,42 +233,13 @@ def log(
Examples
--------
- import datetime
-
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.log(
- path="persona",
- prompt={
- "model": "gpt-4",
- "template": [
- {
- "role": "system",
- "content": "You are {{person}}. Answer questions as this person. Do not break character.",
- }
- ],
- },
- messages=[{"role": "user", "content": "What really happened at Roswell?"}],
- inputs={"person": "Trump"},
- created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
- ),
- provider_latency=6.5931549072265625,
- output_message={
- "content": "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.",
- "role": "assistant",
- },
- prompt_tokens=100,
- output_tokens=220,
- prompt_cost=1e-05,
- output_cost=0.0002,
- finish_reason="stop",
- )
+ import datetime
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+ }, created_at=datetime.datetime.fromisoformat("2024-07-19 00:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
"""
- response = self._raw_client.log(
+ _response = self._raw_client.log(
version_id=version_id,
environment=environment,
run_id=run_id,
@@ -308,7 +276,7 @@ def log(
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
def update_log(
self,
@@ -433,16 +401,10 @@ def update_log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.update_log(
- id="id",
- log_id="log_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.update_log(id='id', log_id='log_id', )
"""
- response = self._raw_client.update_log(
+ _response = self._raw_client.update_log(
id,
log_id,
output_message=output_message,
@@ -469,7 +431,7 @@ def update_log(
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
def call_stream(
self,
@@ -480,7 +442,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -538,8 +500,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -603,10 +568,7 @@ def call_stream(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
response = client.prompts.call_stream()
for chunk in response:
yield chunk
@@ -649,7 +611,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -707,8 +669,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -772,42 +737,13 @@ def call(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.call(
- path="persona",
- prompt={
- "model": "gpt-4",
- "template": [
- {
- "role": "system",
- "content": "You are stockbot. Return latest prices.",
- }
- ],
- "tools": [
- {
- "name": "get_stock_price",
- "description": "Get current stock price",
- "parameters": {
- "type": "object",
- "properties": {
- "ticker_symbol": {
- "type": "string",
- "name": "Ticker Symbol",
- "description": "Ticker symbol of the stock",
- }
- },
- "required": [],
- },
- }
- ],
- },
- messages=[{"role": "user", "content": "latest apple"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
"""
- response = self._raw_client.call(
+ _response = self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -834,7 +770,7 @@ def call(
suffix=suffix,
request_options=request_options,
)
- return response.data
+ return _response.data
def list(
self,
@@ -843,7 +779,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[PromptResponse]:
@@ -864,7 +800,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -881,68 +817,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.prompts.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.prompts.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "prompts",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataPromptResponse,
- construct_type(
- type_=PaginatedDataPromptResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -1076,28 +967,10 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.upsert(
- path="Personal Projects/Coding Assistant",
- model="gpt-4o",
- endpoint="chat",
- template=[
- {
- "content": "You are a helpful coding assistant specialising in {{language}}",
- "role": "system",
- }
- ],
- provider="openai",
- max_tokens=-1,
- temperature=0.7,
- version_name="coding-assistant-v1",
- version_description="Initial version",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', )
"""
- response = self._raw_client.upsert(
+ _response = self._raw_client.upsert(
model=model,
path=path,
id=id,
@@ -1125,7 +998,7 @@ def upsert(
readme=readme,
request_options=request_options,
)
- return response.data
+ return _response.data
def get(
self,
@@ -1163,18 +1036,13 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.get(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1195,16 +1063,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.delete(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -1239,17 +1102,11 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.move(
- id="pr_30gco7dx6JDq4200GVOHa",
- path="new directory/new name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', )
"""
- response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
def populate(
self,
@@ -1290,19 +1147,14 @@ def populate(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.populate(
- id="id",
- request={"key": "value"},
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.populate(id='id', request={'key': 'value'
+ }, )
"""
- response = self._raw_client.populate(
+ _response = self._raw_client.populate(
id, request=request, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def list_versions(
self,
@@ -1333,18 +1185,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.list_versions(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
def delete_prompt_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1370,17 +1217,11 @@ def delete_prompt_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.delete_prompt_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.delete_prompt_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.delete_prompt_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_prompt_version(id, version_id, request_options=request_options)
+ return _response.data
def patch_prompt_version(
self,
@@ -1419,19 +1260,13 @@ def patch_prompt_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.patch_prompt_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.patch_prompt_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.patch_prompt_version(
+ _response = self._raw_client.patch_prompt_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1464,20 +1299,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1506,17 +1334,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.remove_deployment(
- id="id",
- environment_id="environment_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.remove_deployment(id='id', environment_id='environment_id', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1540,16 +1362,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.list_environments(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
def update_monitoring(
self,
@@ -1586,19 +1403,13 @@ def update_monitoring(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.update_monitoring(
- id="pr_30gco7dx6JDq4200GVOHa",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
"""
- response = self._raw_client.update_monitoring(
+ _response = self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
def serialize(
self,
@@ -1607,7 +1418,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -1633,23 +1444,19 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.serialize(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.serialize(id='id', )
"""
- response = self._raw_client.serialize(
+ _response = self._raw_client.serialize(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def deserialize(
self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
@@ -1675,16 +1482,11 @@ def deserialize(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.prompts.deserialize(
- prompt="prompt",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.deserialize(prompt='prompt', )
"""
- response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
- return response.data
+ _response = self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return _response.data
class AsyncPromptsClient:
@@ -1719,7 +1521,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1800,8 +1602,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1870,51 +1675,16 @@ async def log(
Examples
--------
- import asyncio
- import datetime
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import datetime
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.log(
- path="persona",
- prompt={
- "model": "gpt-4",
- "template": [
- {
- "role": "system",
- "content": "You are {{person}}. Answer questions as this person. Do not break character.",
- }
- ],
- },
- messages=[
- {"role": "user", "content": "What really happened at Roswell?"}
- ],
- inputs={"person": "Trump"},
- created_at=datetime.datetime.fromisoformat(
- "2024-07-18 23:29:35.178000+00:00",
- ),
- provider_latency=6.5931549072265625,
- output_message={
- "content": "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.",
- "role": "assistant",
- },
- prompt_tokens=100,
- output_tokens=220,
- prompt_cost=1e-05,
- output_cost=0.0002,
- finish_reason="stop",
- )
-
-
+ await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+ }, created_at=datetime.datetime.fromisoformat("2024-07-19 00:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
asyncio.run(main())
"""
- response = await self._raw_client.log(
+ _response = await self._raw_client.log(
version_id=version_id,
environment=environment,
run_id=run_id,
@@ -1951,7 +1721,7 @@ async def main() -> None:
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
async def update_log(
self,
@@ -2075,25 +1845,14 @@ async def update_log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.update_log(
- id="id",
- log_id="log_id",
- )
-
-
+ await client.prompts.update_log(id='id', log_id='log_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_log(
+ _response = await self._raw_client.update_log(
id,
log_id,
output_message=output_message,
@@ -2120,7 +1879,7 @@ async def main() -> None:
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
async def call_stream(
self,
@@ -2131,7 +1890,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2189,8 +1948,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2253,21 +2015,13 @@ async def call_stream(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
response = await client.prompts.call_stream()
async for chunk in response:
yield chunk
-
-
asyncio.run(main())
"""
async with self._raw_client.call_stream(
@@ -2309,7 +2063,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2367,8 +2121,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2431,51 +2188,17 @@ async def call(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.call(
- path="persona",
- prompt={
- "model": "gpt-4",
- "template": [
- {
- "role": "system",
- "content": "You are stockbot. Return latest prices.",
- }
- ],
- "tools": [
- {
- "name": "get_stock_price",
- "description": "Get current stock price",
- "parameters": {
- "type": "object",
- "properties": {
- "ticker_symbol": {
- "type": "string",
- "name": "Ticker Symbol",
- "description": "Ticker symbol of the stock",
- }
- },
- "required": [],
- },
- }
- ],
- },
- messages=[{"role": "user", "content": "latest apple"}],
- )
-
-
+ await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
asyncio.run(main())
"""
- response = await self._raw_client.call(
+ _response = await self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -2502,7 +2225,7 @@ async def main() -> None:
suffix=suffix,
request_options=request_options,
)
- return response.data
+ return _response.data
async def list(
self,
@@ -2511,7 +2234,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[PromptResponse]:
@@ -2532,7 +2255,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -2548,77 +2271,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.prompts.list(
- size=1,
- )
+ response = await client.prompts.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "prompts",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataPromptResponse,
- construct_type(
- type_=PaginatedDataPromptResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -2751,37 +2425,14 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.upsert(
- path="Personal Projects/Coding Assistant",
- model="gpt-4o",
- endpoint="chat",
- template=[
- {
- "content": "You are a helpful coding assistant specialising in {{language}}",
- "role": "system",
- }
- ],
- provider="openai",
- max_tokens=-1,
- temperature=0.7,
- version_name="coding-assistant-v1",
- version_description="Initial version",
- )
-
-
+ await client.prompts.upsert(path='Personal Projects/Coding Assistant', model='gpt-4o', endpoint="chat", template=[{'content': 'You are a helpful coding assistant specialising in {{language}}', 'role': "system"}], provider="openai", max_tokens=-1, temperature=0.7, version_name='coding-assistant-v1', version_description='Initial version', )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
model=model,
path=path,
id=id,
@@ -2809,7 +2460,7 @@ async def main() -> None:
readme=readme,
request_options=request_options,
)
- return response.data
+ return _response.data
async def get(
self,
@@ -2846,27 +2497,17 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.get(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
-
-
+ await client.prompts.get(id='pr_30gco7dx6JDq4200GVOHa', )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -2886,25 +2527,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.delete(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
-
-
+ await client.prompts.delete(id='pr_30gco7dx6JDq4200GVOHa', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -2938,26 +2569,15 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.move(
- id="pr_30gco7dx6JDq4200GVOHa",
- path="new directory/new name",
- )
-
-
+ await client.prompts.move(id='pr_30gco7dx6JDq4200GVOHa', path='new directory/new name', )
asyncio.run(main())
"""
- response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
async def populate(
self,
@@ -2997,28 +2617,18 @@ async def populate(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.populate(
- id="id",
- request={"key": "value"},
- )
-
-
+ await client.prompts.populate(id='id', request={'key': 'value'
+ }, )
asyncio.run(main())
"""
- response = await self._raw_client.populate(
+ _response = await self._raw_client.populate(
id, request=request, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def list_versions(
self,
@@ -3048,27 +2658,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.list_versions(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
-
-
+ await client.prompts.list_versions(id='pr_30gco7dx6JDq4200GVOHa', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
async def delete_prompt_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3093,26 +2693,15 @@ async def delete_prompt_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.delete_prompt_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.prompts.delete_prompt_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_prompt_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_prompt_version(id, version_id, request_options=request_options)
+ return _response.data
async def patch_prompt_version(
self,
@@ -3150,28 +2739,17 @@ async def patch_prompt_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.patch_prompt_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.prompts.patch_prompt_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.patch_prompt_version(
+ _response = await self._raw_client.patch_prompt_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -3203,29 +2781,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.set_deployment(
- id="id",
- environment_id="environment_id",
- version_id="version_id",
- )
-
-
+ await client.prompts.set_deployment(id='id', environment_id='environment_id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3253,26 +2819,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.remove_deployment(
- id="id",
- environment_id="environment_id",
- )
-
-
+ await client.prompts.remove_deployment(id='id', environment_id='environment_id', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3295,25 +2850,15 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.list_environments(
- id="pr_30gco7dx6JDq4200GVOHa",
- )
-
-
+ await client.prompts.list_environments(id='pr_30gco7dx6JDq4200GVOHa', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
async def update_monitoring(
self,
@@ -3349,28 +2894,17 @@ async def update_monitoring(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.update_monitoring(
- id="pr_30gco7dx6JDq4200GVOHa",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
-
-
+ await client.prompts.update_monitoring(id='pr_30gco7dx6JDq4200GVOHa', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
asyncio.run(main())
"""
- response = await self._raw_client.update_monitoring(
+ _response = await self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
async def serialize(
self,
@@ -3379,7 +2913,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -3405,31 +2939,22 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.serialize(
- id="id",
- )
-
-
+ await client.prompts.serialize(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.serialize(
+ _response = await self._raw_client.serialize(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def deserialize(
self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
@@ -3454,22 +2979,12 @@ async def deserialize(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.prompts.deserialize(
- prompt="prompt",
- )
-
-
+ await client.prompts.deserialize(prompt='prompt', )
asyncio.run(main())
"""
- response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
- return response.data
+ _response = await self._raw_client.deserialize(prompt=prompt, request_options=request_options)
+ return _response.data
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 2b907d91..57ba25ad 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -1,52 +1,57 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..requests.chat_message import ChatMessageParams
-from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+import contextlib
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+from json.decoder import JSONDecodeError
+
+import httpx_sse
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.create_prompt_log_response import CreatePromptLogResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
-from ..types.log_response import LogResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
-from ..requests.provider_api_keys import ProviderApiKeysParams
-from ..types.prompt_call_stream_response import PromptCallStreamResponse
-import httpx_sse
-import contextlib
-from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
-from ..types.prompt_call_response import PromptCallResponse
-from ..types.model_endpoints import ModelEndpoints
-from .requests.prompt_request_template import PromptRequestTemplateParams
-from ..types.template_language import TemplateLanguage
-from ..types.model_providers import ModelProviders
-from .requests.prompt_request_stop import PromptRequestStopParams
-from ..requests.response_format import ResponseFormatParams
-from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
-from ..requests.tool_function import ToolFunctionParams
-from ..types.prompt_response import PromptResponse
-from ..types.populate_template_response import PopulateTemplateResponse
-from ..types.list_prompts import ListPrompts
-from ..types.file_environment_response import FileEnvironmentResponse
+from ..requests.chat_message import ChatMessageParams
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
+from ..requests.provider_api_keys import ProviderApiKeysParams
+from ..requests.response_format import ResponseFormatParams
+from ..requests.tool_function import ToolFunctionParams
+from ..types.create_prompt_log_response import CreatePromptLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.http_validation_error import HttpValidationError
+from ..types.list_prompts import ListPrompts
+from ..types.log_response import LogResponse
+from ..types.log_status import LogStatus
+from ..types.model_endpoints import ModelEndpoints
+from ..types.model_providers import ModelProviders
+from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse
+from ..types.populate_template_response import PopulateTemplateResponse
+from ..types.prompt_call_response import PromptCallResponse
+from ..types.prompt_call_stream_response import PromptCallStreamResponse
from ..types.prompt_kernel_request import PromptKernelRequest
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..types.prompt_response import PromptResponse
+from ..types.sort_order import SortOrder
+from ..types.template_language import TemplateLanguage
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
+from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
+from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
+from .requests.prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
+from .requests.prompt_request_stop import PromptRequestStopParams
+from .requests.prompt_request_template import PromptRequestTemplateParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
+from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
+from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -73,7 +78,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -154,8 +159,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -249,7 +257,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -289,18 +297,19 @@ def log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_log(
self,
@@ -473,18 +482,19 @@ def update_log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.contextmanager
def call_stream(
@@ -496,7 +506,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -554,8 +564,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -633,7 +646,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -673,7 +686,7 @@ def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -682,18 +695,21 @@ def _iter():
_response.read()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield stream()
@@ -706,7 +722,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -764,8 +780,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +862,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -884,18 +903,115 @@ def call(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[PromptResponse]:
+ """
+ Get a list of all Prompts.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Prompts to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Prompt name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Prompts by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[PromptResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "prompts",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataPromptResponse,
+ construct_type(
+ type_=PaginatedDataPromptResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upsert(
self,
@@ -1084,18 +1200,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -1151,18 +1268,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -1190,18 +1308,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -1258,18 +1377,19 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def populate(
self,
@@ -1315,6 +1435,9 @@ def populate(
"environment": environment,
},
json=request,
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1330,18 +1453,19 @@ def populate(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -1389,18 +1513,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_prompt_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1433,18 +1558,19 @@ def delete_prompt_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def patch_prompt_version(
self,
@@ -1487,6 +1613,9 @@ def patch_prompt_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1502,18 +1631,19 @@ def patch_prompt_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1563,18 +1693,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1610,18 +1741,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1659,18 +1791,19 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_monitoring(
self,
@@ -1719,6 +1852,9 @@ def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1734,18 +1870,19 @@ def update_monitoring(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def serialize(
self,
@@ -1754,7 +1891,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -1780,7 +1917,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -1793,21 +1931,22 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return HttpResponse(response=_response, data=_response.text) # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def deserialize(
self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
@@ -1854,18 +1993,19 @@ def deserialize(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawPromptsClient:
@@ -1889,7 +2029,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1970,8 +2110,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2065,7 +2208,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2105,18 +2248,19 @@ async def log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_log(
self,
@@ -2289,18 +2433,19 @@ async def update_log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
@contextlib.asynccontextmanager
async def call_stream(
@@ -2312,7 +2457,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2370,8 +2515,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2449,7 +2597,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2489,7 +2637,7 @@ async def _iter():
if _sse.data == None:
return
try:
- yield _sse.data()
+ yield _sse.data
except Exception:
pass
return
@@ -2498,18 +2646,21 @@ async def _iter():
await _response.aread()
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
+ )
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
yield await stream()
@@ -2522,7 +2673,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2580,8 +2731,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2659,7 +2813,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2700,18 +2854,118 @@ async def call(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[PromptResponse]:
+ """
+ Get a list of all Prompts.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Prompts to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Prompt name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Prompts by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[PromptResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "prompts",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataPromptResponse,
+ construct_type(
+ type_=PaginatedDataPromptResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upsert(
self,
@@ -2900,18 +3154,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -2967,18 +3222,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3008,18 +3264,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -3076,18 +3333,19 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def populate(
self,
@@ -3133,6 +3391,9 @@ async def populate(
"environment": environment,
},
json=request,
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3148,18 +3409,19 @@ async def populate(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -3207,18 +3469,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_prompt_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3251,18 +3514,19 @@ async def delete_prompt_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def patch_prompt_version(
self,
@@ -3305,6 +3569,9 @@ async def patch_prompt_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3320,18 +3587,19 @@ async def patch_prompt_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -3381,18 +3649,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3428,18 +3697,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -3477,18 +3747,19 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_monitoring(
self,
@@ -3537,6 +3808,9 @@ async def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -3552,18 +3826,19 @@ async def update_monitoring(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def serialize(
self,
@@ -3572,7 +3847,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -3598,7 +3873,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -3611,21 +3887,22 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return AsyncHttpResponse(response=_response, data=_response.text) # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def deserialize(
self, *, prompt: str, request_options: typing.Optional[RequestOptions] = None
@@ -3672,15 +3949,16 @@ async def deserialize(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index 3971e252..67f6233e 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,19 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
"PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..18417e47
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py
index 27b3d80d..eb1a3a0d 100644
--- a/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py
+++ b/src/humanloop/prompts/requests/prompt_log_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
PromptLogRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py
index d2d0584b..18598c0a 100644
--- a/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py
+++ b/src/humanloop/prompts/requests/prompt_log_update_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
PromptLogUpdateRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
index 080a107e..c40a1fdd 100644
--- a/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
+++ b/src/humanloop/prompts/requests/prompt_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
PromptRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/requests/prompt_request_template.py b/src/humanloop/prompts/requests/prompt_request_template.py
index d4c3b5bc..51e6905d 100644
--- a/src/humanloop/prompts/requests/prompt_request_template.py
+++ b/src/humanloop/prompts/requests/prompt_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.chat_message import ChatMessageParams
PromptRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..c9ef087f
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py
index 0bea327d..9a2e39ad 100644
--- a/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py
+++ b/src/humanloop/prompts/requests/prompts_call_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
PromptsCallRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..f27fc93b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py
index c2d4606b..d8e537d8 100644
--- a/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...requests.tool_choice import ToolChoiceParams
PromptsCallStreamRequestToolChoiceParams = typing.Union[
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 1b849e7d..964060c2 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,19 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
"PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..6b1c1c5e
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompt_log_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_request_tool_choice.py
index d101ba50..f7c0c6e9 100644
--- a/src/humanloop/prompts/types/prompt_log_request_tool_choice.py
+++ b/src/humanloop/prompts/types/prompt_log_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
PromptLogRequestToolChoice = typing.Union[
diff --git a/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py b/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py
index 8279556a..0edb325c 100644
--- a/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py
+++ b/src/humanloop/prompts/types/prompt_log_update_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
PromptLogUpdateRequestToolChoice = typing.Union[
diff --git a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
index 33f35288..89eefb37 100644
--- a/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
+++ b/src/humanloop/prompts/types/prompt_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.open_ai_reasoning_effort import OpenAiReasoningEffort
PromptRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/prompts/types/prompt_request_template.py b/src/humanloop/prompts/types/prompt_request_template.py
index 9808ce55..0e3dc1b4 100644
--- a/src/humanloop/prompts/types/prompt_request_template.py
+++ b/src/humanloop/prompts/types/prompt_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.chat_message import ChatMessage
PromptRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..98cb80c3
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_request_tool_choice.py
index ba4ca617..8fc2cad0 100644
--- a/src/humanloop/prompts/types/prompts_call_request_tool_choice.py
+++ b/src/humanloop/prompts/types/prompts_call_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
PromptsCallRequestToolChoice = typing.Union[
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..c623bcae
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py b/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py
index 02863f76..67b9e533 100644
--- a/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ...types.tool_choice import ToolChoice
PromptsCallStreamRequestToolChoice = typing.Union[
diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py
index fb1580df..a95f70ac 100644
--- a/src/humanloop/requests/__init__.py
+++ b/src/humanloop/requests/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .agent_call_response import AgentCallResponseParams
from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
from .agent_call_stream_response import AgentCallStreamResponseParams
diff --git a/src/humanloop/requests/agent_call_response.py b/src/humanloop/requests/agent_call_response.py
index ffc925ec..1e72ba93 100644
--- a/src/humanloop/requests/agent_call_response.py
+++ b/src/humanloop/requests/agent_call_response.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
+import datetime as dt
import typing
+
+import typing_extensions
+from ..types.log_status import LogStatus
from .agent_call_response_tool_choice import AgentCallResponseToolChoiceParams
from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
+from .chat_message import ChatMessageParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/agent_call_response_tool_choice.py b/src/humanloop/requests/agent_call_response_tool_choice.py
index 6cc9f9c4..906cdf4b 100644
--- a/src/humanloop/requests/agent_call_response_tool_choice.py
+++ b/src/humanloop/requests/agent_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoiceParams
AgentCallResponseToolChoiceParams = typing.Union[
diff --git a/src/humanloop/requests/agent_call_stream_response.py b/src/humanloop/requests/agent_call_stream_response.py
index 9555925d..9bc8d29c 100644
--- a/src/humanloop/requests/agent_call_stream_response.py
+++ b/src/humanloop/requests/agent_call_stream_response.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
+
import typing_extensions
-import typing_extensions
-from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
from ..types.event_type import EventType
-import datetime as dt
+from .agent_call_stream_response_payload import AgentCallStreamResponsePayloadParams
class AgentCallStreamResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/agent_call_stream_response_payload.py b/src/humanloop/requests/agent_call_stream_response_payload.py
index 0e08a6f3..876525c3 100644
--- a/src/humanloop/requests/agent_call_stream_response_payload.py
+++ b/src/humanloop/requests/agent_call_stream_response_payload.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .log_stream_response import LogStreamResponseParams
+
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .tool_call import ToolCallParams
AgentCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_call_response.py b/src/humanloop/requests/agent_continue_call_response.py
index 90938dea..d30b4f39 100644
--- a/src/humanloop/requests/agent_continue_call_response.py
+++ b/src/humanloop/requests/agent_continue_call_response.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
+import datetime as dt
import typing
+
+import typing_extensions
+from ..types.log_status import LogStatus
from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoiceParams
from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
+from .chat_message import ChatMessageParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/agent_continue_call_response_tool_choice.py b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
index 4722dd2e..2111fd9a 100644
--- a/src/humanloop/requests/agent_continue_call_response_tool_choice.py
+++ b/src/humanloop/requests/agent_continue_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoiceParams
AgentContinueCallResponseToolChoiceParams = typing.Union[
diff --git a/src/humanloop/requests/agent_continue_call_stream_response.py b/src/humanloop/requests/agent_continue_call_stream_response.py
index 3eb2b498..bf725bb5 100644
--- a/src/humanloop/requests/agent_continue_call_stream_response.py
+++ b/src/humanloop/requests/agent_continue_call_stream_response.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
+
import typing_extensions
-import typing_extensions
-from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
from ..types.event_type import EventType
-import datetime as dt
+from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayloadParams
class AgentContinueCallStreamResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/agent_continue_call_stream_response_payload.py b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
index 87e1562b..e176905a 100644
--- a/src/humanloop/requests/agent_continue_call_stream_response_payload.py
+++ b/src/humanloop/requests/agent_continue_call_stream_response_payload.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .log_stream_response import LogStreamResponseParams
+
from .log_response import LogResponseParams
+from .log_stream_response import LogStreamResponseParams
from .tool_call import ToolCallParams
AgentContinueCallStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
deleted file mode 100644
index 8300667b..00000000
--- a/src/humanloop/requests/agent_continue_response.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
-import typing
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
-from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-from .evaluator_log_response import EvaluatorLogResponseParams
-from .log_response import LogResponseParams
-
-
-class AgentContinueResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing_extensions.NotRequired[int]
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing_extensions.NotRequired[str]
- """
- Reason the generation finished.
- """
-
- messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponseParams
- """
- Agent that generated the Log.
- """
-
- start_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event started.
- """
-
- end_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event ended.
- """
-
- output: typing_extensions.NotRequired[str]
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing_extensions.NotRequired[dt.datetime]
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing_extensions.NotRequired[str]
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing_extensions.NotRequired[float]
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing_extensions.NotRequired[str]
- """
- Captured log and debug statements.
- """
-
- provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw request sent to provider.
- """
-
- provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw response received the provider.
- """
-
- inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- The inputs passed to the prompt template.
- """
-
- source: typing_extensions.NotRequired[str]
- """
- Identifies where the model was called from.
- """
-
- metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Any additional metadata to record.
- """
-
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing_extensions.NotRequired[str]
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing_extensions.NotRequired[typing.Sequence[str]]
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing_extensions.NotRequired[str]
- """
- End-user ID related to the Log.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing_extensions.NotRequired[bool]
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing_extensions.NotRequired[str]
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
deleted file mode 100644
index 24b044cc..00000000
--- a/src/humanloop/requests/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoiceParams
-
-AgentContinueResponseToolChoiceParams = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
-]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
deleted file mode 100644
index 1038e000..00000000
--- a/src/humanloop/requests/agent_continue_stream_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
-from ..types.event_type import EventType
-import datetime as dt
-
-
-class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
- type: EventType
- created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
deleted file mode 100644
index ddd74c10..00000000
--- a/src/humanloop/requests/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponseParams
-from .log_response import LogResponseParams
-from .tool_call import ToolCallParams
-
-AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_inline_tool.py b/src/humanloop/requests/agent_inline_tool.py
index 31f9401a..4d86d77e 100644
--- a/src/humanloop/requests/agent_inline_tool.py
+++ b/src/humanloop/requests/agent_inline_tool.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
-from .tool_function import ToolFunctionParams
+
import typing_extensions
from ..types.on_agent_call_enum import OnAgentCallEnum
+from .tool_function import ToolFunctionParams
class AgentInlineToolParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/agent_kernel_request.py b/src/humanloop/requests/agent_kernel_request.py
index 0ca76571..8bc43e3d 100644
--- a/src/humanloop/requests/agent_kernel_request.py
+++ b/src/humanloop/requests/agent_kernel_request.py
@@ -1,16 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import typing
+
import typing_extensions
from ..types.model_endpoints import ModelEndpoints
-from .agent_kernel_request_template import AgentKernelRequestTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
-from .agent_kernel_request_stop import AgentKernelRequestStopParams
-import typing
-from .response_format import ResponseFormatParams
+from ..types.template_language import TemplateLanguage
from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffortParams
+from .agent_kernel_request_stop import AgentKernelRequestStopParams
+from .agent_kernel_request_template import AgentKernelRequestTemplateParams
from .agent_kernel_request_tools_item import AgentKernelRequestToolsItemParams
+from .response_format import ResponseFormatParams
class AgentKernelRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
index ea32bc11..ef446d7b 100644
--- a/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
+++ b/src/humanloop/requests/agent_kernel_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
AgentKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_kernel_request_template.py b/src/humanloop/requests/agent_kernel_request_template.py
index 7261667d..875dc18b 100644
--- a/src/humanloop/requests/agent_kernel_request_template.py
+++ b/src/humanloop/requests/agent_kernel_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
AgentKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_kernel_request_tools_item.py b/src/humanloop/requests/agent_kernel_request_tools_item.py
index 27b63984..5ee508f8 100644
--- a/src/humanloop/requests/agent_kernel_request_tools_item.py
+++ b/src/humanloop/requests/agent_kernel_request_tools_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .agent_linked_file_request import AgentLinkedFileRequestParams
+
from .agent_inline_tool import AgentInlineToolParams
+from .agent_linked_file_request import AgentLinkedFileRequestParams
AgentKernelRequestToolsItemParams = typing.Union[AgentLinkedFileRequestParams, AgentInlineToolParams]
diff --git a/src/humanloop/requests/agent_linked_file_request.py b/src/humanloop/requests/agent_linked_file_request.py
index 18fc2274..e8950811 100644
--- a/src/humanloop/requests/agent_linked_file_request.py
+++ b/src/humanloop/requests/agent_linked_file_request.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
-from .linked_file_request import LinkedFileRequestParams
+
import typing_extensions
from ..types.on_agent_call_enum import OnAgentCallEnum
+from .linked_file_request import LinkedFileRequestParams
class AgentLinkedFileRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/agent_linked_file_response.py b/src/humanloop/requests/agent_linked_file_response.py
index 8a690a77..1bcc8128 100644
--- a/src/humanloop/requests/agent_linked_file_response.py
+++ b/src/humanloop/requests/agent_linked_file_response.py
@@ -1,12 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
+
import typing
-from .linked_file_request import LinkedFileRequestParams
+
import typing_extensions
from ..types.on_agent_call_enum import OnAgentCallEnum
-import typing
+from .linked_file_request import LinkedFileRequestParams
if typing.TYPE_CHECKING:
from .agent_linked_file_response_file import AgentLinkedFileResponseFileParams
diff --git a/src/humanloop/requests/agent_linked_file_response_file.py b/src/humanloop/requests/agent_linked_file_response_file.py
index bb328de2..25c71dbe 100644
--- a/src/humanloop/requests/agent_linked_file_response_file.py
+++ b/src/humanloop/requests/agent_linked_file_response_file.py
@@ -1,16 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponseParams
-import typing
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponseParams
- from .tool_response import ToolResponseParams
+ from .agent_response import AgentResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
- from .agent_response import AgentResponseParams
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
AgentLinkedFileResponseFileParams = typing.Union[
"PromptResponseParams",
"ToolResponseParams",
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
index 0cb24b8a..940f348f 100644
--- a/src/humanloop/requests/agent_log_response.py
+++ b/src/humanloop/requests/agent_log_response.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
+
+import datetime as dt
import typing
+
+import typing_extensions
+from ..types.log_status import LogStatus
from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-import typing
+from .chat_message import ChatMessageParams
if typing.TYPE_CHECKING:
from .evaluator_log_response import EvaluatorLogResponseParams
diff --git a/src/humanloop/requests/agent_log_response_tool_choice.py b/src/humanloop/requests/agent_log_response_tool_choice.py
index e239a69c..30ebcb72 100644
--- a/src/humanloop/requests/agent_log_response_tool_choice.py
+++ b/src/humanloop/requests/agent_log_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoiceParams
AgentLogResponseToolChoiceParams = typing.Union[
diff --git a/src/humanloop/requests/agent_log_stream_response.py b/src/humanloop/requests/agent_log_stream_response.py
index 710d55cf..cd35485e 100644
--- a/src/humanloop/requests/agent_log_stream_response.py
+++ b/src/humanloop/requests/agent_log_stream_response.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import datetime as dt
+
+import typing_extensions
from .chat_message import ChatMessageParams
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
index 047904a7..10f47b80 100644
--- a/src/humanloop/requests/agent_response.py
+++ b/src/humanloop/requests/agent_response.py
@@ -1,23 +1,23 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
+
+import datetime as dt
+import typing
+
import typing_extensions
from ..types.model_endpoints import ModelEndpoints
-from .agent_response_template import AgentResponseTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
-from .agent_response_stop import AgentResponseStopParams
-import typing
-from .response_format import ResponseFormatParams
-from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
-from .environment_response import EnvironmentResponseParams
-import datetime as dt
+from ..types.template_language import TemplateLanguage
from ..types.user_response import UserResponse
from ..types.version_status import VersionStatus
-from .input_response import InputResponseParams
+from .agent_response_reasoning_effort import AgentResponseReasoningEffortParams
+from .agent_response_stop import AgentResponseStopParams
+from .agent_response_template import AgentResponseTemplateParams
+from .environment_response import EnvironmentResponseParams
from .evaluator_aggregate import EvaluatorAggregateParams
-import typing
+from .input_response import InputResponseParams
+from .response_format import ResponseFormatParams
if typing.TYPE_CHECKING:
from .agent_response_tools_item import AgentResponseToolsItemParams
@@ -235,3 +235,8 @@ class AgentResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Agent Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/agent_response_reasoning_effort.py b/src/humanloop/requests/agent_response_reasoning_effort.py
index de1b969f..a32f2ecf 100644
--- a/src/humanloop/requests/agent_response_reasoning_effort.py
+++ b/src/humanloop/requests/agent_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
AgentResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/agent_response_template.py b/src/humanloop/requests/agent_response_template.py
index 94be65f1..3998be1b 100644
--- a/src/humanloop/requests/agent_response_template.py
+++ b/src/humanloop/requests/agent_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
AgentResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/agent_response_tools_item.py b/src/humanloop/requests/agent_response_tools_item.py
index 5181579b..87e1e036 100644
--- a/src/humanloop/requests/agent_response_tools_item.py
+++ b/src/humanloop/requests/agent_response_tools_item.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .agent_inline_tool import AgentInlineToolParams
-import typing
if typing.TYPE_CHECKING:
from .agent_linked_file_response import AgentLinkedFileResponseParams
diff --git a/src/humanloop/requests/anthropic_redacted_thinking_content.py b/src/humanloop/requests/anthropic_redacted_thinking_content.py
index 3b328f7f..b71f614e 100644
--- a/src/humanloop/requests/anthropic_redacted_thinking_content.py
+++ b/src/humanloop/requests/anthropic_redacted_thinking_content.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+import typing_extensions
+
class AnthropicRedactedThinkingContentParams(typing_extensions.TypedDict):
type: typing.Literal["redacted_thinking"]
diff --git a/src/humanloop/requests/anthropic_thinking_content.py b/src/humanloop/requests/anthropic_thinking_content.py
index 34f6f99f..23fdffb6 100644
--- a/src/humanloop/requests/anthropic_thinking_content.py
+++ b/src/humanloop/requests/anthropic_thinking_content.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+import typing_extensions
+
class AnthropicThinkingContentParams(typing_extensions.TypedDict):
type: typing.Literal["thinking"]
diff --git a/src/humanloop/requests/chat_message.py b/src/humanloop/requests/chat_message.py
index 6011653a..eeb6c7cd 100644
--- a/src/humanloop/requests/chat_message.py
+++ b/src/humanloop/requests/chat_message.py
@@ -1,12 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
-import typing_extensions
-from .chat_message_content import ChatMessageContentParams
from ..types.chat_role import ChatRole
-import typing
-from .tool_call import ToolCallParams
+from .chat_message_content import ChatMessageContentParams
from .chat_message_thinking_item import ChatMessageThinkingItemParams
+from .tool_call import ToolCallParams
class ChatMessageParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/chat_message_content.py b/src/humanloop/requests/chat_message_content.py
index 155e1e54..ea04974e 100644
--- a/src/humanloop/requests/chat_message_content.py
+++ b/src/humanloop/requests/chat_message_content.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message_content_item import ChatMessageContentItemParams
ChatMessageContentParams = typing.Union[str, typing.Sequence[ChatMessageContentItemParams]]
diff --git a/src/humanloop/requests/chat_message_content_item.py b/src/humanloop/requests/chat_message_content_item.py
index b10b9849..c4a24ea7 100644
--- a/src/humanloop/requests/chat_message_content_item.py
+++ b/src/humanloop/requests/chat_message_content_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .text_chat_content import TextChatContentParams
+
from .image_chat_content import ImageChatContentParams
+from .text_chat_content import TextChatContentParams
ChatMessageContentItemParams = typing.Union[TextChatContentParams, ImageChatContentParams]
diff --git a/src/humanloop/requests/chat_message_thinking_item.py b/src/humanloop/requests/chat_message_thinking_item.py
index 0691f4d8..0c54d371 100644
--- a/src/humanloop/requests/chat_message_thinking_item.py
+++ b/src/humanloop/requests/chat_message_thinking_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .anthropic_thinking_content import AnthropicThinkingContentParams
+
from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContentParams
+from .anthropic_thinking_content import AnthropicThinkingContentParams
ChatMessageThinkingItemParams = typing.Union[AnthropicThinkingContentParams, AnthropicRedactedThinkingContentParams]
diff --git a/src/humanloop/requests/code_evaluator_request.py b/src/humanloop/requests/code_evaluator_request.py
index 363a2fc7..914d8f46 100644
--- a/src/humanloop/requests/code_evaluator_request.py
+++ b/src/humanloop/requests/code_evaluator_request.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.evaluator_arguments_type import EvaluatorArgumentsType
from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing_extensions
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
-from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
from ..types.valence import Valence
+from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
class CodeEvaluatorRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
index b1715517..f68f2e96 100644
--- a/src/humanloop/requests/create_agent_log_response.py
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from ..types.log_status import LogStatus
diff --git a/src/humanloop/requests/create_datapoint_request.py b/src/humanloop/requests/create_datapoint_request.py
index 8e9d5005..10ada080 100644
--- a/src/humanloop/requests/create_datapoint_request.py
+++ b/src/humanloop/requests/create_datapoint_request.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+
+import typing_extensions
from .chat_message import ChatMessageParams
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams
diff --git a/src/humanloop/requests/create_evaluator_log_response.py b/src/humanloop/requests/create_evaluator_log_response.py
index 177e957d..29fbcdc5 100644
--- a/src/humanloop/requests/create_evaluator_log_response.py
+++ b/src/humanloop/requests/create_evaluator_log_response.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class CreateEvaluatorLogResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/create_flow_log_response.py b/src/humanloop/requests/create_flow_log_response.py
index eb51c4c0..6f490ba3 100644
--- a/src/humanloop/requests/create_flow_log_response.py
+++ b/src/humanloop/requests/create_flow_log_response.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from ..types.log_status import LogStatus
diff --git a/src/humanloop/requests/create_prompt_log_response.py b/src/humanloop/requests/create_prompt_log_response.py
index 507e6d6c..8a0b39d3 100644
--- a/src/humanloop/requests/create_prompt_log_response.py
+++ b/src/humanloop/requests/create_prompt_log_response.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class CreatePromptLogResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/create_tool_log_response.py b/src/humanloop/requests/create_tool_log_response.py
index 01ed522c..9b898fba 100644
--- a/src/humanloop/requests/create_tool_log_response.py
+++ b/src/humanloop/requests/create_tool_log_response.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class CreateToolLogResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/dashboard_configuration.py b/src/humanloop/requests/dashboard_configuration.py
index dc8d1bf0..b123ac78 100644
--- a/src/humanloop/requests/dashboard_configuration.py
+++ b/src/humanloop/requests/dashboard_configuration.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.time_unit import TimeUnit
-import typing
class DashboardConfigurationParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/datapoint_response.py b/src/humanloop/requests/datapoint_response.py
index 37e18107..ba1928e9 100644
--- a/src/humanloop/requests/datapoint_response.py
+++ b/src/humanloop/requests/datapoint_response.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+
+import typing_extensions
from .chat_message import ChatMessageParams
from .datapoint_response_target_value import DatapointResponseTargetValueParams
diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py
index 1cffd2b2..aa0119e9 100644
--- a/src/humanloop/requests/dataset_response.py
+++ b/src/humanloop/requests/dataset_response.py
@@ -1,12 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
-import typing
-from .environment_response import EnvironmentResponseParams
import datetime as dt
+import typing
+
+import typing_extensions
from ..types.user_response import UserResponse
from .datapoint_response import DatapointResponseParams
+from .environment_response import EnvironmentResponseParams
class DatasetResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/directory_response.py b/src/humanloop/requests/directory_response.py
index 0eb1f6ad..4dc4a7d5 100644
--- a/src/humanloop/requests/directory_response.py
+++ b/src/humanloop/requests/directory_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
-import typing
import datetime as dt
+import typing
+
+import typing_extensions
class DirectoryResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response.py b/src/humanloop/requests/directory_with_parents_and_children_response.py
index 782b7a2a..27af28b6 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
-import typing
import datetime as dt
+import typing
+
+import typing_extensions
from .directory_response import DirectoryResponseParams
from .directory_with_parents_and_children_response_files_item import (
DirectoryWithParentsAndChildrenResponseFilesItemParams,
diff --git a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
index db9370b9..1ebe44fc 100644
--- a/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/requests/directory_with_parents_and_children_response_files_item.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponseParams
-from .tool_response import ToolResponseParams
-from .evaluator_response import EvaluatorResponseParams
+
+from .agent_response import AgentResponseParams
from .dataset_response import DatasetResponseParams
+from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
-from .agent_response import AgentResponseParams
+from .prompt_response import PromptResponseParams
+from .tool_response import ToolResponseParams
DirectoryWithParentsAndChildrenResponseFilesItemParams = typing.Union[
PromptResponseParams,
diff --git a/src/humanloop/requests/environment_response.py b/src/humanloop/requests/environment_response.py
index f0842a5e..0c74481e 100644
--- a/src/humanloop/requests/environment_response.py
+++ b/src/humanloop/requests/environment_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import datetime as dt
+
+import typing_extensions
from ..types.environment_tag import EnvironmentTag
diff --git a/src/humanloop/requests/evaluatee_request.py b/src/humanloop/requests/evaluatee_request.py
index 3141b48f..d7544be1 100644
--- a/src/humanloop/requests/evaluatee_request.py
+++ b/src/humanloop/requests/evaluatee_request.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class EvaluateeRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluatee_response.py b/src/humanloop/requests/evaluatee_response.py
index cd818c4b..fb860a37 100644
--- a/src/humanloop/requests/evaluatee_response.py
+++ b/src/humanloop/requests/evaluatee_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import datetime as dt
+
import typing_extensions
from .run_version_response import RunVersionResponseParams
-import datetime as dt
class EvaluateeResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluation_evaluator_response.py b/src/humanloop/requests/evaluation_evaluator_response.py
index b2f1c60f..3d40ba33 100644
--- a/src/humanloop/requests/evaluation_evaluator_response.py
+++ b/src/humanloop/requests/evaluation_evaluator_response.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
+
import typing_extensions
from .evaluator_response import EvaluatorResponseParams
-import datetime as dt
class EvaluationEvaluatorResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluation_log_response.py b/src/humanloop/requests/evaluation_log_response.py
index e423d2f7..5bbd0649 100644
--- a/src/humanloop/requests/evaluation_log_response.py
+++ b/src/humanloop/requests/evaluation_log_response.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import typing
+
import typing_extensions
from .datapoint_response import DatapointResponseParams
from .log_response import LogResponseParams
-import typing
class EvaluationLogResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluation_response.py b/src/humanloop/requests/evaluation_response.py
index 27d9da73..4c077927 100644
--- a/src/humanloop/requests/evaluation_response.py
+++ b/src/humanloop/requests/evaluation_response.py
@@ -1,11 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import datetime as dt
import typing
-from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams
+
import typing_extensions
-import datetime as dt
from ..types.user_response import UserResponse
+from .evaluation_evaluator_response import EvaluationEvaluatorResponseParams
class EvaluationResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluation_run_response.py b/src/humanloop/requests/evaluation_run_response.py
index 98ccfd75..5dd7c782 100644
--- a/src/humanloop/requests/evaluation_run_response.py
+++ b/src/humanloop/requests/evaluation_run_response.py
@@ -1,12 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
+
import typing_extensions
-import typing_extensions
+from ..types.evaluation_status import EvaluationStatus
+from ..types.user_response import UserResponse
from .dataset_response import DatasetResponseParams
from .run_version_response import RunVersionResponseParams
-import datetime as dt
-from ..types.user_response import UserResponse
-from ..types.evaluation_status import EvaluationStatus
class EvaluationRunResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluation_runs_response.py b/src/humanloop/requests/evaluation_runs_response.py
index a6e86d68..fd3d4792 100644
--- a/src/humanloop/requests/evaluation_runs_response.py
+++ b/src/humanloop/requests/evaluation_runs_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluation_run_response import EvaluationRunResponseParams
diff --git a/src/humanloop/requests/evaluation_stats.py b/src/humanloop/requests/evaluation_stats.py
index 0a5a6a4a..edd56e15 100644
--- a/src/humanloop/requests/evaluation_stats.py
+++ b/src/humanloop/requests/evaluation_stats.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
-from .run_stats_response import RunStatsResponseParams
+
import typing_extensions
from ..types.evaluation_status import EvaluationStatus
+from .run_stats_response import RunStatsResponseParams
class EvaluationStatsParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request.py b/src/humanloop/requests/evaluator_activation_deactivation_request.py
index 819cc802..b3f3f91d 100644
--- a/src/humanloop/requests/evaluator_activation_deactivation_request.py
+++ b/src/humanloop/requests/evaluator_activation_deactivation_request.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py
index dd458ef3..049c4cc8 100644
--- a/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py
+++ b/src/humanloop/requests/evaluator_activation_deactivation_request_activate_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
+
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
+from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
EvaluatorActivationDeactivationRequestActivateItemParams = typing.Union[
MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams
diff --git a/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py
index b9a4a547..4a21dcaf 100644
--- a/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py
+++ b/src/humanloop/requests/evaluator_activation_deactivation_request_deactivate_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
+
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequestParams
+from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequestParams
EvaluatorActivationDeactivationRequestDeactivateItemParams = typing.Union[
MonitoringEvaluatorVersionRequestParams, MonitoringEvaluatorEnvironmentRequestParams
diff --git a/src/humanloop/requests/evaluator_aggregate.py b/src/humanloop/requests/evaluator_aggregate.py
index abf1cf63..f8840d4f 100644
--- a/src/humanloop/requests/evaluator_aggregate.py
+++ b/src/humanloop/requests/evaluator_aggregate.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import datetime as dt
+import typing_extensions
+
class EvaluatorAggregateParams(typing_extensions.TypedDict):
value: float
diff --git a/src/humanloop/requests/evaluator_file_id.py b/src/humanloop/requests/evaluator_file_id.py
index f31eb14e..952eda84 100644
--- a/src/humanloop/requests/evaluator_file_id.py
+++ b/src/humanloop/requests/evaluator_file_id.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class EvaluatorFileIdParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluator_file_path.py b/src/humanloop/requests/evaluator_file_path.py
index 84aec171..0b1a06c9 100644
--- a/src/humanloop/requests/evaluator_file_path.py
+++ b/src/humanloop/requests/evaluator_file_path.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class EvaluatorFilePathParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluator_judgment_number_limit.py b/src/humanloop/requests/evaluator_judgment_number_limit.py
index 4de95994..3cdd87db 100644
--- a/src/humanloop/requests/evaluator_judgment_number_limit.py
+++ b/src/humanloop/requests/evaluator_judgment_number_limit.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class EvaluatorJudgmentNumberLimitParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/evaluator_judgment_option_response.py b/src/humanloop/requests/evaluator_judgment_option_response.py
index 13836745..77724406 100644
--- a/src/humanloop/requests/evaluator_judgment_option_response.py
+++ b/src/humanloop/requests/evaluator_judgment_option_response.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from ..types.valence import Valence
diff --git a/src/humanloop/requests/evaluator_log_response.py b/src/humanloop/requests/evaluator_log_response.py
index 81527903..c434280e 100644
--- a/src/humanloop/requests/evaluator_log_response.py
+++ b/src/humanloop/requests/evaluator_log_response.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
+
import datetime as dt
import typing
+
+import typing_extensions
from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams
from .evaluator_response import EvaluatorResponseParams
-import typing
if typing.TYPE_CHECKING:
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py
index 1ff836fb..38093ae5 100644
--- a/src/humanloop/requests/evaluator_response.py
+++ b/src/humanloop/requests/evaluator_response.py
@@ -1,16 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-from .evaluator_response_spec import EvaluatorResponseSpecParams
-import typing
-from .environment_response import EnvironmentResponseParams
+
import datetime as dt
+import typing
+
+import typing_extensions
from ..types.user_response import UserResponse
-from .input_response import InputResponseParams
+from .environment_response import EnvironmentResponseParams
from .evaluator_aggregate import EvaluatorAggregateParams
-import typing
+from .evaluator_response_spec import EvaluatorResponseSpecParams
+from .input_response import InputResponseParams
if typing.TYPE_CHECKING:
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
diff --git a/src/humanloop/requests/evaluator_response_spec.py b/src/humanloop/requests/evaluator_response_spec.py
index b60bf1d2..72cf3d82 100644
--- a/src/humanloop/requests/evaluator_response_spec.py
+++ b/src/humanloop/requests/evaluator_response_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .llm_evaluator_request import LlmEvaluatorRequestParams
+
from .code_evaluator_request import CodeEvaluatorRequestParams
-from .human_evaluator_request import HumanEvaluatorRequestParams
from .external_evaluator_request import ExternalEvaluatorRequestParams
+from .human_evaluator_request import HumanEvaluatorRequestParams
+from .llm_evaluator_request import LlmEvaluatorRequestParams
EvaluatorResponseSpecParams = typing.Union[
LlmEvaluatorRequestParams, CodeEvaluatorRequestParams, HumanEvaluatorRequestParams, ExternalEvaluatorRequestParams
diff --git a/src/humanloop/requests/evaluator_version_id.py b/src/humanloop/requests/evaluator_version_id.py
index 6e2d8351..94700595 100644
--- a/src/humanloop/requests/evaluator_version_id.py
+++ b/src/humanloop/requests/evaluator_version_id.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class EvaluatorVersionIdParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/external_evaluator_request.py b/src/humanloop/requests/external_evaluator_request.py
index bfe2d9c7..6e77103f 100644
--- a/src/humanloop/requests/external_evaluator_request.py
+++ b/src/humanloop/requests/external_evaluator_request.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.evaluator_arguments_type import EvaluatorArgumentsType
from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing_extensions
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
-from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
from ..types.valence import Valence
+from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
class ExternalEvaluatorRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/file_environment_response.py b/src/humanloop/requests/file_environment_response.py
index b243ec1c..40b60bc7 100644
--- a/src/humanloop/requests/file_environment_response.py
+++ b/src/humanloop/requests/file_environment_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import datetime as dt
-from ..types.environment_tag import EnvironmentTag
+
import typing_extensions
+from ..types.environment_tag import EnvironmentTag
from .file_environment_response_file import FileEnvironmentResponseFileParams
diff --git a/src/humanloop/requests/file_environment_response_file.py b/src/humanloop/requests/file_environment_response_file.py
index 04c0b51d..1a2021cb 100644
--- a/src/humanloop/requests/file_environment_response_file.py
+++ b/src/humanloop/requests/file_environment_response_file.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponseParams
-from .tool_response import ToolResponseParams
+
+from .agent_response import AgentResponseParams
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
-from .agent_response import AgentResponseParams
+from .prompt_response import PromptResponseParams
+from .tool_response import ToolResponseParams
FileEnvironmentResponseFileParams = typing.Union[
PromptResponseParams,
diff --git a/src/humanloop/requests/file_id.py b/src/humanloop/requests/file_id.py
index 68c7a609..d6c39755 100644
--- a/src/humanloop/requests/file_id.py
+++ b/src/humanloop/requests/file_id.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class FileIdParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/file_path.py b/src/humanloop/requests/file_path.py
index 2ea77a9f..238927d8 100644
--- a/src/humanloop/requests/file_path.py
+++ b/src/humanloop/requests/file_path.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class FilePathParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/file_request.py b/src/humanloop/requests/file_request.py
index ebbf8108..91e730d6 100644
--- a/src/humanloop/requests/file_request.py
+++ b/src/humanloop/requests/file_request.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class FileRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/flow_kernel_request.py b/src/humanloop/requests/flow_kernel_request.py
index fc7618b1..0a2b7993 100644
--- a/src/humanloop/requests/flow_kernel_request.py
+++ b/src/humanloop/requests/flow_kernel_request.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+import typing_extensions
+
class FlowKernelRequestParams(typing_extensions.TypedDict):
attributes: typing.Dict[str, typing.Optional[typing.Any]]
diff --git a/src/humanloop/requests/flow_log_response.py b/src/humanloop/requests/flow_log_response.py
index ffb81054..661fc301 100644
--- a/src/humanloop/requests/flow_log_response.py
+++ b/src/humanloop/requests/flow_log_response.py
@@ -1,14 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-import typing
-from .chat_message import ChatMessageParams
+
import datetime as dt
+import typing
+
+import typing_extensions
from ..types.log_status import LogStatus
+from .chat_message import ChatMessageParams
from .flow_response import FlowResponseParams
-import typing
if typing.TYPE_CHECKING:
from .evaluator_log_response import EvaluatorLogResponseParams
diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py
index eebc9fd7..19087c61 100644
--- a/src/humanloop/requests/flow_response.py
+++ b/src/humanloop/requests/flow_response.py
@@ -1,14 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-import typing
-from .environment_response import EnvironmentResponseParams
+
import datetime as dt
+import typing
+
+import typing_extensions
from ..types.user_response import UserResponse
+from .environment_response import EnvironmentResponseParams
from .evaluator_aggregate import EvaluatorAggregateParams
-import typing
if typing.TYPE_CHECKING:
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
diff --git a/src/humanloop/requests/function_tool.py b/src/humanloop/requests/function_tool.py
index 35cf420a..473b2b6e 100644
--- a/src/humanloop/requests/function_tool.py
+++ b/src/humanloop/requests/function_tool.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class FunctionToolParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/http_validation_error.py b/src/humanloop/requests/http_validation_error.py
index d8aca1bf..7b0ed08f 100644
--- a/src/humanloop/requests/http_validation_error.py
+++ b/src/humanloop/requests/http_validation_error.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+
+import typing_extensions
from .validation_error import ValidationErrorParams
diff --git a/src/humanloop/requests/human_evaluator_request.py b/src/humanloop/requests/human_evaluator_request.py
index af19cf09..9bd32e2d 100644
--- a/src/humanloop/requests/human_evaluator_request.py
+++ b/src/humanloop/requests/human_evaluator_request.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.evaluator_arguments_type import EvaluatorArgumentsType
from ..types.human_evaluator_request_return_type import HumanEvaluatorRequestReturnType
-import typing_extensions
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
-from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
from ..types.valence import Valence
+from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
class HumanEvaluatorRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/image_chat_content.py b/src/humanloop/requests/image_chat_content.py
index 1e01f4a8..5dc1163e 100644
--- a/src/humanloop/requests/image_chat_content.py
+++ b/src/humanloop/requests/image_chat_content.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .image_url import ImageUrlParams
diff --git a/src/humanloop/requests/image_url.py b/src/humanloop/requests/image_url.py
index e7cc2776..9d2a671b 100644
--- a/src/humanloop/requests/image_url.py
+++ b/src/humanloop/requests/image_url.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from ..types.image_url_detail import ImageUrlDetail
diff --git a/src/humanloop/requests/linked_file_request.py b/src/humanloop/requests/linked_file_request.py
index 2bbba19c..58c44162 100644
--- a/src/humanloop/requests/linked_file_request.py
+++ b/src/humanloop/requests/linked_file_request.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class LinkedFileRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/linked_tool_response.py b/src/humanloop/requests/linked_tool_response.py
index d9248067..646549d9 100644
--- a/src/humanloop/requests/linked_tool_response.py
+++ b/src/humanloop/requests/linked_tool_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+import typing_extensions
+
class LinkedToolResponseParams(typing_extensions.TypedDict):
name: str
diff --git a/src/humanloop/requests/list_agents.py b/src/humanloop/requests/list_agents.py
index 4a72f1db..8e1d6b0e 100644
--- a/src/humanloop/requests/list_agents.py
+++ b/src/humanloop/requests/list_agents.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .agent_response import AgentResponseParams
diff --git a/src/humanloop/requests/list_datasets.py b/src/humanloop/requests/list_datasets.py
index d397d0b9..b49ea512 100644
--- a/src/humanloop/requests/list_datasets.py
+++ b/src/humanloop/requests/list_datasets.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .dataset_response import DatasetResponseParams
diff --git a/src/humanloop/requests/list_evaluators.py b/src/humanloop/requests/list_evaluators.py
index f1c59b3c..61d1aa46 100644
--- a/src/humanloop/requests/list_evaluators.py
+++ b/src/humanloop/requests/list_evaluators.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluator_response import EvaluatorResponseParams
diff --git a/src/humanloop/requests/list_flows.py b/src/humanloop/requests/list_flows.py
index 459d9751..32b90142 100644
--- a/src/humanloop/requests/list_flows.py
+++ b/src/humanloop/requests/list_flows.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .flow_response import FlowResponseParams
diff --git a/src/humanloop/requests/list_prompts.py b/src/humanloop/requests/list_prompts.py
index d33796e3..717fd9eb 100644
--- a/src/humanloop/requests/list_prompts.py
+++ b/src/humanloop/requests/list_prompts.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .prompt_response import PromptResponseParams
diff --git a/src/humanloop/requests/list_tools.py b/src/humanloop/requests/list_tools.py
index d05fdb3a..d12fe188 100644
--- a/src/humanloop/requests/list_tools.py
+++ b/src/humanloop/requests/list_tools.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .tool_response import ToolResponseParams
diff --git a/src/humanloop/requests/llm_evaluator_request.py b/src/humanloop/requests/llm_evaluator_request.py
index 14a88eac..fd4c6d29 100644
--- a/src/humanloop/requests/llm_evaluator_request.py
+++ b/src/humanloop/requests/llm_evaluator_request.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.evaluator_arguments_type import EvaluatorArgumentsType
from ..types.evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing_extensions
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
-from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
from ..types.valence import Valence
+from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimitParams
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponseParams
from .prompt_kernel_request import PromptKernelRequestParams
diff --git a/src/humanloop/requests/log_response.py b/src/humanloop/requests/log_response.py
index cb3ce212..3a1f56a0 100644
--- a/src/humanloop/requests/log_response.py
+++ b/src/humanloop/requests/log_response.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing
+
import typing
if typing.TYPE_CHECKING:
- from .prompt_log_response import PromptLogResponseParams
- from .tool_log_response import ToolLogResponseParams
+ from .agent_log_response import AgentLogResponseParams
from .evaluator_log_response import EvaluatorLogResponseParams
from .flow_log_response import FlowLogResponseParams
- from .agent_log_response import AgentLogResponseParams
+ from .prompt_log_response import PromptLogResponseParams
+ from .tool_log_response import ToolLogResponseParams
LogResponseParams = typing.Union[
"PromptLogResponseParams",
"ToolLogResponseParams",
diff --git a/src/humanloop/requests/log_stream_response.py b/src/humanloop/requests/log_stream_response.py
index e142e7fb..2a9b1952 100644
--- a/src/humanloop/requests/log_stream_response.py
+++ b/src/humanloop/requests/log_stream_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_call_stream_response import PromptCallStreamResponseParams
+
from .agent_log_stream_response import AgentLogStreamResponseParams
+from .prompt_call_stream_response import PromptCallStreamResponseParams
LogStreamResponseParams = typing.Union[PromptCallStreamResponseParams, AgentLogStreamResponseParams]
diff --git a/src/humanloop/requests/monitoring_evaluator_response.py b/src/humanloop/requests/monitoring_evaluator_response.py
index 51ade40e..c946fc65 100644
--- a/src/humanloop/requests/monitoring_evaluator_response.py
+++ b/src/humanloop/requests/monitoring_evaluator_response.py
@@ -1,15 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-from ..types.monitoring_evaluator_state import MonitoringEvaluatorState
+
import datetime as dt
import typing
+import typing_extensions
+from ..types.monitoring_evaluator_state import MonitoringEvaluatorState
+
if typing.TYPE_CHECKING:
- from .version_reference_response import VersionReferenceResponseParams
from .evaluator_response import EvaluatorResponseParams
+ from .version_reference_response import VersionReferenceResponseParams
class MonitoringEvaluatorResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/numeric_evaluator_stats_response.py b/src/humanloop/requests/numeric_evaluator_stats_response.py
index 12ac73c9..a74784ce 100644
--- a/src/humanloop/requests/numeric_evaluator_stats_response.py
+++ b/src/humanloop/requests/numeric_evaluator_stats_response.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+import typing_extensions
+
class NumericEvaluatorStatsResponseParams(typing_extensions.TypedDict):
"""
diff --git a/src/humanloop/requests/paginated_data_agent_response.py b/src/humanloop/requests/paginated_data_agent_response.py
index c8d67533..af318b6a 100644
--- a/src/humanloop/requests/paginated_data_agent_response.py
+++ b/src/humanloop/requests/paginated_data_agent_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .agent_response import AgentResponseParams
diff --git a/src/humanloop/requests/paginated_data_evaluation_log_response.py b/src/humanloop/requests/paginated_data_evaluation_log_response.py
index e9723472..61439b55 100644
--- a/src/humanloop/requests/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/requests/paginated_data_evaluation_log_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluation_log_response import EvaluationLogResponseParams
diff --git a/src/humanloop/requests/paginated_data_evaluator_response.py b/src/humanloop/requests/paginated_data_evaluator_response.py
index 25a28ece..15294571 100644
--- a/src/humanloop/requests/paginated_data_evaluator_response.py
+++ b/src/humanloop/requests/paginated_data_evaluator_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluator_response import EvaluatorResponseParams
diff --git a/src/humanloop/requests/paginated_data_flow_response.py b/src/humanloop/requests/paginated_data_flow_response.py
index 1cf128b8..51db6406 100644
--- a/src/humanloop/requests/paginated_data_flow_response.py
+++ b/src/humanloop/requests/paginated_data_flow_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .flow_response import FlowResponseParams
diff --git a/src/humanloop/requests/paginated_data_log_response.py b/src/humanloop/requests/paginated_data_log_response.py
index d0fef001..450f2d0e 100644
--- a/src/humanloop/requests/paginated_data_log_response.py
+++ b/src/humanloop/requests/paginated_data_log_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .log_response import LogResponseParams
diff --git a/src/humanloop/requests/paginated_data_prompt_response.py b/src/humanloop/requests/paginated_data_prompt_response.py
index 36ec1148..62eae52b 100644
--- a/src/humanloop/requests/paginated_data_prompt_response.py
+++ b/src/humanloop/requests/paginated_data_prompt_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .prompt_response import PromptResponseParams
diff --git a/src/humanloop/requests/paginated_data_tool_response.py b/src/humanloop/requests/paginated_data_tool_response.py
index 8282ba29..41eaf15a 100644
--- a/src/humanloop/requests/paginated_data_tool_response.py
+++ b/src/humanloop/requests/paginated_data_tool_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .tool_response import ToolResponseParams
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index 0e7adb64..5bde00b9 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams,
)
diff --git a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index b43a5521..51db2493 100644
--- a/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
+++ b/src/humanloop/requests/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponseParams
-from .tool_response import ToolResponseParams
+
+from .agent_response import AgentResponseParams
from .dataset_response import DatasetResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
-from .agent_response import AgentResponseParams
+from .prompt_response import PromptResponseParams
+from .tool_response import ToolResponseParams
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItemParams = typing.Union[
PromptResponseParams,
diff --git a/src/humanloop/requests/paginated_datapoint_response.py b/src/humanloop/requests/paginated_datapoint_response.py
index bbd7ee5c..5ef2bae4 100644
--- a/src/humanloop/requests/paginated_datapoint_response.py
+++ b/src/humanloop/requests/paginated_datapoint_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .datapoint_response import DatapointResponseParams
diff --git a/src/humanloop/requests/paginated_dataset_response.py b/src/humanloop/requests/paginated_dataset_response.py
index addadeb8..ea5cd5b1 100644
--- a/src/humanloop/requests/paginated_dataset_response.py
+++ b/src/humanloop/requests/paginated_dataset_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .dataset_response import DatasetResponseParams
diff --git a/src/humanloop/requests/paginated_evaluation_response.py b/src/humanloop/requests/paginated_evaluation_response.py
index a83b402c..30916a81 100644
--- a/src/humanloop/requests/paginated_evaluation_response.py
+++ b/src/humanloop/requests/paginated_evaluation_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .evaluation_response import EvaluationResponseParams
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 491cacd3..a6ed2b2f 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -1,24 +1,24 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import datetime as dt
+import typing
+
import typing_extensions
from ..types.model_endpoints import ModelEndpoints
-from .populate_template_response_template import PopulateTemplateResponseTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
-from .populate_template_response_stop import PopulateTemplateResponseStopParams
-import typing
-from .response_format import ResponseFormatParams
-from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
-from .tool_function import ToolFunctionParams
-from .linked_tool_response import LinkedToolResponseParams
-from .environment_response import EnvironmentResponseParams
-import datetime as dt
+from ..types.template_language import TemplateLanguage
from ..types.user_response import UserResponse
+from .environment_response import EnvironmentResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
from .input_response import InputResponseParams
+from .linked_tool_response import LinkedToolResponseParams
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
-from .evaluator_aggregate import EvaluatorAggregateParams
from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplateParams
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffortParams
+from .populate_template_response_stop import PopulateTemplateResponseStopParams
+from .populate_template_response_template import PopulateTemplateResponseTemplateParams
+from .response_format import ResponseFormatParams
+from .tool_function import ToolFunctionParams
class PopulateTemplateResponseParams(typing_extensions.TypedDict):
@@ -218,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/populate_template_response_populated_template.py b/src/humanloop/requests/populate_template_response_populated_template.py
index 3a332031..79bc7505 100644
--- a/src/humanloop/requests/populate_template_response_populated_template.py
+++ b/src/humanloop/requests/populate_template_response_populated_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
PopulateTemplateResponsePopulatedTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/populate_template_response_reasoning_effort.py b/src/humanloop/requests/populate_template_response_reasoning_effort.py
index 6b1dd46a..9140180f 100644
--- a/src/humanloop/requests/populate_template_response_reasoning_effort.py
+++ b/src/humanloop/requests/populate_template_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
PopulateTemplateResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/populate_template_response_template.py b/src/humanloop/requests/populate_template_response_template.py
index 99eb01a7..7a9ba8e9 100644
--- a/src/humanloop/requests/populate_template_response_template.py
+++ b/src/humanloop/requests/populate_template_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
PopulateTemplateResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/prompt_call_log_response.py b/src/humanloop/requests/prompt_call_log_response.py
index 497ab23e..4dff347b 100644
--- a/src/humanloop/requests/prompt_call_log_response.py
+++ b/src/humanloop/requests/prompt_call_log_response.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import datetime as dt
+
+import typing_extensions
from .chat_message import ChatMessageParams
diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py
index e465218f..14ff4609 100644
--- a/src/humanloop/requests/prompt_call_response.py
+++ b/src/humanloop/requests/prompt_call_response.py
@@ -1,14 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import datetime as dt
import typing
+
+import typing_extensions
+from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
+from .prompt_call_log_response import PromptCallLogResponseParams
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
from .prompt_response import PromptResponseParams
-from ..types.log_status import LogStatus
-from .prompt_call_log_response import PromptCallLogResponseParams
class PromptCallResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/prompt_call_response_tool_choice.py b/src/humanloop/requests/prompt_call_response_tool_choice.py
index 2f570391..63fd7183 100644
--- a/src/humanloop/requests/prompt_call_response_tool_choice.py
+++ b/src/humanloop/requests/prompt_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoiceParams
PromptCallResponseToolChoiceParams = typing.Union[
diff --git a/src/humanloop/requests/prompt_call_stream_response.py b/src/humanloop/requests/prompt_call_stream_response.py
index 92ae613d..9d3e5651 100644
--- a/src/humanloop/requests/prompt_call_stream_response.py
+++ b/src/humanloop/requests/prompt_call_stream_response.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import datetime as dt
+
+import typing_extensions
from .chat_message import ChatMessageParams
diff --git a/src/humanloop/requests/prompt_kernel_request.py b/src/humanloop/requests/prompt_kernel_request.py
index 1e4f56de..48d8db46 100644
--- a/src/humanloop/requests/prompt_kernel_request.py
+++ b/src/humanloop/requests/prompt_kernel_request.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import typing
+
import typing_extensions
from ..types.model_endpoints import ModelEndpoints
-from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
+from ..types.template_language import TemplateLanguage
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .prompt_kernel_request_stop import PromptKernelRequestStopParams
-import typing
+from .prompt_kernel_request_template import PromptKernelRequestTemplateParams
from .response_format import ResponseFormatParams
-from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffortParams
from .tool_function import ToolFunctionParams
diff --git a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
index 0c3d194b..81df2957 100644
--- a/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
+++ b/src/humanloop/requests/prompt_kernel_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
PromptKernelRequestReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_kernel_request_template.py b/src/humanloop/requests/prompt_kernel_request_template.py
index dc76fad3..aa389a04 100644
--- a/src/humanloop/requests/prompt_kernel_request_template.py
+++ b/src/humanloop/requests/prompt_kernel_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
PromptKernelRequestTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py
index 15dba29f..6147adec 100644
--- a/src/humanloop/requests/prompt_log_response.py
+++ b/src/humanloop/requests/prompt_log_response.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
+import datetime as dt
+import typing
+
import typing_extensions
-import typing_extensions
+from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
-import typing
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-import typing
if typing.TYPE_CHECKING:
from .evaluator_log_response import EvaluatorLogResponseParams
diff --git a/src/humanloop/requests/prompt_log_response_tool_choice.py b/src/humanloop/requests/prompt_log_response_tool_choice.py
index 372b3a72..8e8ad6dd 100644
--- a/src/humanloop/requests/prompt_log_response_tool_choice.py
+++ b/src/humanloop/requests/prompt_log_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoiceParams
PromptLogResponseToolChoiceParams = typing.Union[
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index b6ff03df..7a1b4493 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -1,24 +1,24 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
+
+import datetime as dt
+import typing
+
import typing_extensions
from ..types.model_endpoints import ModelEndpoints
-from .prompt_response_template import PromptResponseTemplateParams
-from ..types.template_language import TemplateLanguage
from ..types.model_providers import ModelProviders
+from ..types.template_language import TemplateLanguage
+from ..types.user_response import UserResponse
+from .environment_response import EnvironmentResponseParams
+from .evaluator_aggregate import EvaluatorAggregateParams
+from .input_response import InputResponseParams
+from .linked_tool_response import LinkedToolResponseParams
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .prompt_response_stop import PromptResponseStopParams
-import typing
+from .prompt_response_template import PromptResponseTemplateParams
from .response_format import ResponseFormatParams
-from .prompt_response_reasoning_effort import PromptResponseReasoningEffortParams
from .tool_function import ToolFunctionParams
-from .linked_tool_response import LinkedToolResponseParams
-from .environment_response import EnvironmentResponseParams
-import datetime as dt
-from ..types.user_response import UserResponse
-from .input_response import InputResponseParams
-from .evaluator_aggregate import EvaluatorAggregateParams
-import typing
if typing.TYPE_CHECKING:
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
@@ -220,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/requests/prompt_response_reasoning_effort.py b/src/humanloop/requests/prompt_response_reasoning_effort.py
index 4d019051..55d82486 100644
--- a/src/humanloop/requests/prompt_response_reasoning_effort.py
+++ b/src/humanloop/requests/prompt_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from ..types.open_ai_reasoning_effort import OpenAiReasoningEffort
PromptResponseReasoningEffortParams = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/requests/prompt_response_template.py b/src/humanloop/requests/prompt_response_template.py
index 77cb8289..b9f6deb4 100644
--- a/src/humanloop/requests/prompt_response_template.py
+++ b/src/humanloop/requests/prompt_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessageParams
PromptResponseTemplateParams = typing.Union[str, typing.Sequence[ChatMessageParams]]
diff --git a/src/humanloop/requests/provider_api_keys.py b/src/humanloop/requests/provider_api_keys.py
index d4ea58a2..c37649ea 100644
--- a/src/humanloop/requests/provider_api_keys.py
+++ b/src/humanloop/requests/provider_api_keys.py
@@ -1,6 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing_extensions
from ..core.serialization import FieldMetadata
diff --git a/src/humanloop/requests/response_format.py b/src/humanloop/requests/response_format.py
index 6a4017a0..1fce8531 100644
--- a/src/humanloop/requests/response_format.py
+++ b/src/humanloop/requests/response_format.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
import typing_extensions
from ..types.response_format_type import ResponseFormatType
-import typing_extensions
-import typing
class ResponseFormatParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/run_stats_response.py b/src/humanloop/requests/run_stats_response.py
index e9127722..6bdbf08e 100644
--- a/src/humanloop/requests/run_stats_response.py
+++ b/src/humanloop/requests/run_stats_response.py
@@ -1,10 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
-from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams
+
+import typing_extensions
from ..types.evaluation_status import EvaluationStatus
+from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItemParams
class RunStatsResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/run_stats_response_evaluator_stats_item.py b/src/humanloop/requests/run_stats_response_evaluator_stats_item.py
index a42aea0b..09231c9b 100644
--- a/src/humanloop/requests/run_stats_response_evaluator_stats_item.py
+++ b/src/humanloop/requests/run_stats_response_evaluator_stats_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
+
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
+from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
diff --git a/src/humanloop/requests/run_version_response.py b/src/humanloop/requests/run_version_response.py
index 569d0d76..3091de87 100644
--- a/src/humanloop/requests/run_version_response.py
+++ b/src/humanloop/requests/run_version_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponseParams
-from .tool_response import ToolResponseParams
+
+from .agent_response import AgentResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
-from .agent_response import AgentResponseParams
+from .prompt_response import PromptResponseParams
+from .tool_response import ToolResponseParams
RunVersionResponseParams = typing.Union[
PromptResponseParams, ToolResponseParams, EvaluatorResponseParams, FlowResponseParams, AgentResponseParams
diff --git a/src/humanloop/requests/select_evaluator_stats_response.py b/src/humanloop/requests/select_evaluator_stats_response.py
index f546f4f6..7c77198a 100644
--- a/src/humanloop/requests/select_evaluator_stats_response.py
+++ b/src/humanloop/requests/select_evaluator_stats_response.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+import typing_extensions
+
class SelectEvaluatorStatsResponseParams(typing_extensions.TypedDict):
"""
diff --git a/src/humanloop/requests/text_chat_content.py b/src/humanloop/requests/text_chat_content.py
index db956653..fa9f5437 100644
--- a/src/humanloop/requests/text_chat_content.py
+++ b/src/humanloop/requests/text_chat_content.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+import typing_extensions
+
class TextChatContentParams(typing_extensions.TypedDict):
type: typing.Literal["text"]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
index 1c92b28f..e00069de 100644
--- a/src/humanloop/requests/tool_call_response.py
+++ b/src/humanloop/requests/tool_call_response.py
@@ -1,13 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import datetime as dt
-from .tool_response import ToolResponseParams
import typing
+
+import typing_extensions
from ..types.log_status import LogStatus
from .evaluator_log_response import EvaluatorLogResponseParams
from .log_response import LogResponseParams
+from .tool_response import ToolResponseParams
class ToolCallResponseParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/tool_function.py b/src/humanloop/requests/tool_function.py
index 5dd0d546..9132b10e 100644
--- a/src/humanloop/requests/tool_function.py
+++ b/src/humanloop/requests/tool_function.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+import typing_extensions
+
class ToolFunctionParams(typing_extensions.TypedDict):
name: str
diff --git a/src/humanloop/requests/tool_kernel_request.py b/src/humanloop/requests/tool_kernel_request.py
index 6973c1d0..48f8f5b1 100644
--- a/src/humanloop/requests/tool_kernel_request.py
+++ b/src/humanloop/requests/tool_kernel_request.py
@@ -1,9 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
+import typing
+
import typing_extensions
from .tool_function import ToolFunctionParams
-import typing
class ToolKernelRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index 1aa0daea..f4be5ad0 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -1,14 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
+
import datetime as dt
import typing
+
+import typing_extensions
from ..types.log_status import LogStatus
-from .tool_response import ToolResponseParams
from .chat_message import ChatMessageParams
-import typing
+from .tool_response import ToolResponseParams
if typing.TYPE_CHECKING:
from .evaluator_log_response import EvaluatorLogResponseParams
diff --git a/src/humanloop/requests/tool_response.py b/src/humanloop/requests/tool_response.py
index 0261405e..ea4ab1df 100644
--- a/src/humanloop/requests/tool_response.py
+++ b/src/humanloop/requests/tool_response.py
@@ -1,17 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing_extensions
-from .tool_function import ToolFunctionParams
+
+import datetime as dt
import typing
+
+import typing_extensions
from ..types.files_tool_type import FilesToolType
-from .environment_response import EnvironmentResponseParams
-import datetime as dt
from ..types.user_response import UserResponse
-from .input_response import InputResponseParams
+from .environment_response import EnvironmentResponseParams
from .evaluator_aggregate import EvaluatorAggregateParams
-import typing
+from .input_response import InputResponseParams
+from .tool_function import ToolFunctionParams
if typing.TYPE_CHECKING:
from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams
diff --git a/src/humanloop/requests/update_version_request.py b/src/humanloop/requests/update_version_request.py
index cbdbd26b..204b3b37 100644
--- a/src/humanloop/requests/update_version_request.py
+++ b/src/humanloop/requests/update_version_request.py
@@ -1,7 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-import typing_extensions
class UpdateVersionRequestParams(typing_extensions.TypedDict):
diff --git a/src/humanloop/requests/validation_error.py b/src/humanloop/requests/validation_error.py
index 567fd3d8..fba151d8 100644
--- a/src/humanloop/requests/validation_error.py
+++ b/src/humanloop/requests/validation_error.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
import typing
+
+import typing_extensions
from .validation_error_loc_item import ValidationErrorLocItemParams
diff --git a/src/humanloop/requests/version_deployment_response.py b/src/humanloop/requests/version_deployment_response.py
index ad450dbb..fdd17544 100644
--- a/src/humanloop/requests/version_deployment_response.py
+++ b/src/humanloop/requests/version_deployment_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
+import typing
+
import typing_extensions
from .environment_response import EnvironmentResponseParams
-import typing
-import typing
if typing.TYPE_CHECKING:
from .version_deployment_response_file import VersionDeploymentResponseFileParams
diff --git a/src/humanloop/requests/version_deployment_response_file.py b/src/humanloop/requests/version_deployment_response_file.py
index 9659cb49..74e04ab8 100644
--- a/src/humanloop/requests/version_deployment_response_file.py
+++ b/src/humanloop/requests/version_deployment_response_file.py
@@ -1,16 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponseParams
-import typing
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponseParams
- from .tool_response import ToolResponseParams
+ from .agent_response import AgentResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
- from .agent_response import AgentResponseParams
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
VersionDeploymentResponseFileParams = typing.Union[
"PromptResponseParams",
"ToolResponseParams",
diff --git a/src/humanloop/requests/version_id_response.py b/src/humanloop/requests/version_id_response.py
index bbb1a18b..af4d3226 100644
--- a/src/humanloop/requests/version_id_response.py
+++ b/src/humanloop/requests/version_id_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing_extensions
-import typing
+
import typing
+import typing_extensions
+
if typing.TYPE_CHECKING:
from .version_id_response_version import VersionIdResponseVersionParams
diff --git a/src/humanloop/requests/version_id_response_version.py b/src/humanloop/requests/version_id_response_version.py
index 9c317679..ac1f96e2 100644
--- a/src/humanloop/requests/version_id_response_version.py
+++ b/src/humanloop/requests/version_id_response_version.py
@@ -1,16 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponseParams
-import typing
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponseParams
- from .tool_response import ToolResponseParams
+ from .agent_response import AgentResponseParams
from .evaluator_response import EvaluatorResponseParams
from .flow_response import FlowResponseParams
- from .agent_response import AgentResponseParams
+ from .prompt_response import PromptResponseParams
+ from .tool_response import ToolResponseParams
VersionIdResponseVersionParams = typing.Union[
"PromptResponseParams",
"ToolResponseParams",
diff --git a/src/humanloop/requests/version_reference_response.py b/src/humanloop/requests/version_reference_response.py
index e9568433..4b80e4cd 100644
--- a/src/humanloop/requests/version_reference_response.py
+++ b/src/humanloop/requests/version_reference_response.py
@@ -1,7 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing
+
import typing
if typing.TYPE_CHECKING:
diff --git a/src/humanloop/requests/version_stats_response.py b/src/humanloop/requests/version_stats_response.py
index 053c0ac9..1bb18233 100644
--- a/src/humanloop/requests/version_stats_response.py
+++ b/src/humanloop/requests/version_stats_response.py
@@ -1,8 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-import typing_extensions
-import typing_extensions
import typing
+
+import typing_extensions
from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItemParams
diff --git a/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py
index 2e893f26..2bbeb15c 100644
--- a/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py
+++ b/src/humanloop/requests/version_stats_response_evaluator_version_stats_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
+
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponseParams
+from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponseParams
from .select_evaluator_stats_response import SelectEvaluatorStatsResponseParams
from .text_evaluator_stats_response import TextEvaluatorStatsResponseParams
diff --git a/src/humanloop/tools/__init__.py b/src/humanloop/tools/__init__.py
index f3ea2659..5cde0202 100644
--- a/src/humanloop/tools/__init__.py
+++ b/src/humanloop/tools/__init__.py
@@ -1,2 +1,4 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index ea6b14a2..d8449a7c 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -1,40 +1,32 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from .raw_client import RawToolsClient
-from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pagination import AsyncPager, SyncPager
from ..core.request_options import RequestOptions
-from ..types.tool_call_response import ToolCallResponse
-from ..types.create_tool_log_response import CreateToolLogResponse
-from ..types.log_response import LogResponse
-from ..types.project_sort_by import ProjectSortBy
-from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.tool_response import ToolResponse
-from ..types.paginated_data_tool_response import PaginatedDataToolResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..requests.tool_function import ToolFunctionParams
-from ..types.files_tool_type import FilesToolType
-from ..types.list_tools import ListTools
-from ..types.file_environment_response import FileEnvironmentResponse
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
-from ..core.client_wrapper import AsyncClientWrapper
-from .raw_client import AsyncRawToolsClient
-from ..core.pagination import AsyncPager
+from ..requests.tool_function import ToolFunctionParams
+from ..requests.tool_kernel_request import ToolKernelRequestParams
+from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..types.file_sort_by import FileSortBy
+from ..types.files_tool_type import FilesToolType
+from ..types.list_tools import ListTools
+from ..types.log_response import LogResponse
+from ..types.log_status import LogStatus
+from ..types.sort_order import SortOrder
+from ..types.tool_call_response import ToolCallResponse
+from ..types.tool_response import ToolResponse
+from .raw_client import AsyncRawToolsClient, RawToolsClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -154,13 +146,10 @@ def call(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
client.tools.call()
"""
- response = self._raw_client.call(
+ _response = self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -180,7 +169,7 @@ def call(
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
def log(
self,
@@ -307,31 +296,15 @@ def log(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
- )
- """
- response = self._raw_client.log(
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+ , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+ , 'required': ['a', 'b']
+ }}}, inputs={'a': 5
+ , 'b': 7
+ }, output='35', )
+ """
+ _response = self._raw_client.log(
version_id=version_id,
environment=environment,
path=path,
@@ -358,7 +331,7 @@ def log(
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
def update(
self,
@@ -443,16 +416,10 @@ def update(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.update(
- id="id",
- log_id="log_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.update(id='id', log_id='log_id', )
"""
- response = self._raw_client.update(
+ _response = self._raw_client.update(
id,
log_id,
output=output,
@@ -470,7 +437,7 @@ def update(
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
def list(
self,
@@ -479,7 +446,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[ToolResponse]:
@@ -500,7 +467,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
@@ -517,68 +484,23 @@ def list(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- response = client.tools.list(
- size=1,
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.tools.list(size=1, )
for item in response:
yield item
# alternatively, you can paginate page-by-page
for page in response.iter_pages():
yield page
"""
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "tools",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataToolResponse,
- construct_type(
- type_=PaginatedDataToolResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
def upsert(
self,
@@ -643,26 +565,13 @@ def upsert(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {"a": {"type": "number"}, "b": {"type": "number"}},
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+ , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+ , 'required': ['a', 'b']
+ }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', )
"""
- response = self._raw_client.upsert(
+ _response = self._raw_client.upsert(
path=path,
id=id,
function=function,
@@ -674,7 +583,7 @@ def upsert(
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
def get(
self,
@@ -712,18 +621,13 @@ def get(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.get(
- id="tl_789ghi",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.get(id='tl_789ghi', )
"""
- response = self._raw_client.get(
+ _response = self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -744,16 +648,11 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.delete(
- id="tl_789ghi",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.delete(id='tl_789ghi', )
"""
- response = self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete(id, request_options=request_options)
+ return _response.data
def move(
self,
@@ -788,17 +687,11 @@ def move(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.move(id='tl_789ghi', path='new directory/new name', )
"""
- response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
def list_versions(
self,
@@ -829,18 +722,13 @@ def list_versions(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.list_versions(
- id="tl_789ghi",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.list_versions(id='tl_789ghi', )
"""
- response = self._raw_client.list_versions(
+ _response = self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
def delete_tool_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -866,17 +754,11 @@ def delete_tool_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.delete_tool_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.delete_tool_version(id, version_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_tool_version(id, version_id, request_options=request_options)
+ return _response.data
def update_tool_version(
self,
@@ -915,19 +797,13 @@ def update_tool_version(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.update_tool_version(
- id="id",
- version_id="version_id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.update_tool_version(id='id', version_id='version_id', )
"""
- response = self._raw_client.update_tool_version(
+ _response = self._raw_client.update_tool_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -960,20 +836,13 @@ def set_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.set_deployment(
- id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', )
"""
- response = self._raw_client.set_deployment(
+ _response = self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1002,17 +871,11 @@ def remove_deployment(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.remove_deployment(
- id="tl_789ghi",
- environment_id="staging",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', )
"""
- response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1036,16 +899,11 @@ def list_environments(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.list_environments(
- id="tl_789ghi",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.list_environments(id='tl_789ghi', )
"""
- response = self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
def update_monitoring(
self,
@@ -1082,19 +940,13 @@ def update_monitoring(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
"""
- response = self._raw_client.update_monitoring(
+ _response = self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
def get_environment_variables(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1116,16 +968,11 @@ def get_environment_variables(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.get_environment_variables(
- id="id",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.get_environment_variables(id='id', )
"""
- response = self._raw_client.get_environment_variables(id, request_options=request_options)
- return response.data
+ _response = self._raw_client.get_environment_variables(id, request_options=request_options)
+ return _response.data
def add_environment_variable(
self,
@@ -1155,17 +1002,11 @@ def add_environment_variable(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.add_environment_variable(
- id="id",
- request=[{"name": "name", "value": "value"}],
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], )
"""
- response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
- return response.data
+ _response = self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
+ return _response.data
def delete_environment_variable(
self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1190,17 +1031,11 @@ def delete_environment_variable(
Examples
--------
from humanloop import Humanloop
-
- client = Humanloop(
- api_key="YOUR_API_KEY",
- )
- client.tools.delete_environment_variable(
- id="id",
- name="name",
- )
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.tools.delete_environment_variable(id='id', name='name', )
"""
- response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
- return response.data
+ _response = self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return _response.data
class AsyncToolsClient:
@@ -1316,22 +1151,14 @@ async def call(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
await client.tools.call()
-
-
asyncio.run(main())
"""
- response = await self._raw_client.call(
+ _response = await self._raw_client.call(
version_id=version_id,
environment=environment,
path=path,
@@ -1351,7 +1178,7 @@ async def main() -> None:
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
async def log(
self,
@@ -1477,40 +1304,19 @@ async def log(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.log(
- path="math-tool",
- tool={
- "function": {
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- }
- },
- inputs={"a": 5, "b": 7},
- output="35",
- )
-
-
+ await client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+ , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+ , 'required': ['a', 'b']
+ }}}, inputs={'a': 5
+ , 'b': 7
+ }, output='35', )
asyncio.run(main())
"""
- response = await self._raw_client.log(
+ _response = await self._raw_client.log(
version_id=version_id,
environment=environment,
path=path,
@@ -1537,7 +1343,7 @@ async def main() -> None:
log_id=log_id,
request_options=request_options,
)
- return response.data
+ return _response.data
async def update(
self,
@@ -1621,25 +1427,14 @@ async def update(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.update(
- id="id",
- log_id="log_id",
- )
-
-
+ await client.tools.update(id='id', log_id='log_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update(
+ _response = await self._raw_client.update(
id,
log_id,
output=output,
@@ -1657,7 +1452,7 @@ async def main() -> None:
log_status=log_status,
request_options=request_options,
)
- return response.data
+ return _response.data
async def list(
self,
@@ -1666,7 +1461,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[ToolResponse]:
@@ -1687,7 +1482,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
@@ -1703,77 +1498,28 @@ async def list(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- response = await client.tools.list(
- size=1,
- )
+ response = await client.tools.list(size=1, )
async for item in response:
yield item
+
# alternatively, you can paginate page-by-page
async for page in response.iter_pages():
yield page
-
-
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "tools",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ return await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataToolResponse,
- construct_type(
- type_=PaginatedDataToolResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
async def upsert(
self,
@@ -1837,38 +1583,17 @@ async def upsert(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.upsert(
- path="math-tool",
- function={
- "name": "multiply",
- "description": "Multiply two numbers",
- "parameters": {
- "type": "object",
- "properties": {
- "a": {"type": "number"},
- "b": {"type": "number"},
- },
- "required": ["a", "b"],
- },
- },
- version_name="math-tool-v1",
- version_description="Simple math tool that multiplies two numbers",
- )
-
-
+ await client.tools.upsert(path='math-tool', function={'name': 'multiply', 'description': 'Multiply two numbers', 'parameters': {'type': 'object'
+ , 'properties': {'a': {'type': 'number'}, 'b': {'type': 'number'}}
+ , 'required': ['a', 'b']
+ }}, version_name='math-tool-v1', version_description='Simple math tool that multiplies two numbers', )
asyncio.run(main())
"""
- response = await self._raw_client.upsert(
+ _response = await self._raw_client.upsert(
path=path,
id=id,
function=function,
@@ -1880,7 +1605,7 @@ async def main() -> None:
version_description=version_description,
request_options=request_options,
)
- return response.data
+ return _response.data
async def get(
self,
@@ -1917,27 +1642,17 @@ async def get(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.get(
- id="tl_789ghi",
- )
-
-
+ await client.tools.get(id='tl_789ghi', )
asyncio.run(main())
"""
- response = await self._raw_client.get(
+ _response = await self._raw_client.get(
id, version_id=version_id, environment=environment, request_options=request_options
)
- return response.data
+ return _response.data
async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
"""
@@ -1957,25 +1672,15 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.delete(
- id="tl_789ghi",
- )
-
-
+ await client.tools.delete(id='tl_789ghi', )
asyncio.run(main())
"""
- response = await self._raw_client.delete(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete(id, request_options=request_options)
+ return _response.data
async def move(
self,
@@ -2009,26 +1714,15 @@ async def move(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.move(
- id="tl_789ghi",
- path="new directory/new name",
- )
-
-
+ await client.tools.move(id='tl_789ghi', path='new directory/new name', )
asyncio.run(main())
"""
- response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
- return response.data
+ _response = await self._raw_client.move(id, path=path, name=name, request_options=request_options)
+ return _response.data
async def list_versions(
self,
@@ -2058,27 +1752,17 @@ async def list_versions(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.list_versions(
- id="tl_789ghi",
- )
-
-
+ await client.tools.list_versions(id='tl_789ghi', )
asyncio.run(main())
"""
- response = await self._raw_client.list_versions(
+ _response = await self._raw_client.list_versions(
id, evaluator_aggregates=evaluator_aggregates, request_options=request_options
)
- return response.data
+ return _response.data
async def delete_tool_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2103,26 +1787,15 @@ async def delete_tool_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.delete_tool_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.tools.delete_tool_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_tool_version(id, version_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_tool_version(id, version_id, request_options=request_options)
+ return _response.data
async def update_tool_version(
self,
@@ -2160,28 +1833,17 @@ async def update_tool_version(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.update_tool_version(
- id="id",
- version_id="version_id",
- )
-
-
+ await client.tools.update_tool_version(id='id', version_id='version_id', )
asyncio.run(main())
"""
- response = await self._raw_client.update_tool_version(
+ _response = await self._raw_client.update_tool_version(
id, version_id, name=name, description=description, request_options=request_options
)
- return response.data
+ return _response.data
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -2213,29 +1875,17 @@ async def set_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.set_deployment(
- id="tl_789ghi",
- environment_id="staging",
- version_id="tv_012jkl",
- )
-
-
+ await client.tools.set_deployment(id='tl_789ghi', environment_id='staging', version_id='tv_012jkl', )
asyncio.run(main())
"""
- response = await self._raw_client.set_deployment(
+ _response = await self._raw_client.set_deployment(
id, environment_id, version_id=version_id, request_options=request_options
)
- return response.data
+ return _response.data
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2263,26 +1913,15 @@ async def remove_deployment(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.remove_deployment(
- id="tl_789ghi",
- environment_id="staging",
- )
-
-
+ await client.tools.remove_deployment(id='tl_789ghi', environment_id='staging', )
asyncio.run(main())
"""
- response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options)
+ return _response.data
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2305,25 +1944,15 @@ async def list_environments(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.list_environments(
- id="tl_789ghi",
- )
-
-
+ await client.tools.list_environments(id='tl_789ghi', )
asyncio.run(main())
"""
- response = await self._raw_client.list_environments(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.list_environments(id, request_options=request_options)
+ return _response.data
async def update_monitoring(
self,
@@ -2359,28 +1988,17 @@ async def update_monitoring(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.update_monitoring(
- id="tl_789ghi",
- activate=[{"evaluator_version_id": "evv_1abc4308abd"}],
- )
-
-
+ await client.tools.update_monitoring(id='tl_789ghi', activate=[{'evaluator_version_id': 'evv_1abc4308abd'}], )
asyncio.run(main())
"""
- response = await self._raw_client.update_monitoring(
+ _response = await self._raw_client.update_monitoring(
id, activate=activate, deactivate=deactivate, request_options=request_options
)
- return response.data
+ return _response.data
async def get_environment_variables(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2401,25 +2019,15 @@ async def get_environment_variables(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.get_environment_variables(
- id="id",
- )
-
-
+ await client.tools.get_environment_variables(id='id', )
asyncio.run(main())
"""
- response = await self._raw_client.get_environment_variables(id, request_options=request_options)
- return response.data
+ _response = await self._raw_client.get_environment_variables(id, request_options=request_options)
+ return _response.data
async def add_environment_variable(
self,
@@ -2448,26 +2056,17 @@ async def add_environment_variable(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.add_environment_variable(
- id="id",
- request=[{"name": "name", "value": "value"}],
- )
-
-
+ await client.tools.add_environment_variable(id='id', request=[{'name': 'name', 'value': 'value'}], )
asyncio.run(main())
"""
- response = await self._raw_client.add_environment_variable(id, request=request, request_options=request_options)
- return response.data
+ _response = await self._raw_client.add_environment_variable(
+ id, request=request, request_options=request_options
+ )
+ return _response.data
async def delete_environment_variable(
self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2491,23 +2090,12 @@ async def delete_environment_variable(
Examples
--------
- import asyncio
-
from humanloop import AsyncHumanloop
-
- client = AsyncHumanloop(
- api_key="YOUR_API_KEY",
- )
-
-
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
async def main() -> None:
- await client.tools.delete_environment_variable(
- id="id",
- name="name",
- )
-
-
+ await client.tools.delete_environment_variable(id='id', name='name', )
asyncio.run(main())
"""
- response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
- return response.data
+ _response = await self._raw_client.delete_environment_variable(id, name, request_options=request_options)
+ return _response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index b412b771..85bbef9e 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -1,37 +1,40 @@
# This file was auto-generated by Fern from our API Definition.
-import typing
-from ..core.client_wrapper import SyncClientWrapper
-from ..requests.tool_kernel_request import ToolKernelRequestParams
import datetime as dt
-from ..types.log_status import LogStatus
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.http_response import AsyncHttpResponse, HttpResponse
+from ..core.jsonable_encoder import jsonable_encoder
+from ..core.pagination import AsyncPager, BaseHttpResponse, SyncPager
from ..core.request_options import RequestOptions
-from ..core.http_response import HttpResponse
-from ..types.tool_call_response import ToolCallResponse
from ..core.serialization import convert_and_respect_annotation_metadata
from ..core.unchecked_base_model import construct_type
from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
-from ..types.create_tool_log_response import CreateToolLogResponse
-from ..types.log_response import LogResponse
-from ..core.jsonable_encoder import jsonable_encoder
-from ..requests.tool_function import ToolFunctionParams
-from ..types.files_tool_type import FilesToolType
-from ..types.tool_response import ToolResponse
-from ..types.list_tools import ListTools
-from ..types.file_environment_response import FileEnvironmentResponse
from ..requests.evaluator_activation_deactivation_request_activate_item import (
EvaluatorActivationDeactivationRequestActivateItemParams,
)
from ..requests.evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItemParams,
)
-from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
from ..requests.file_environment_variable_request import FileEnvironmentVariableRequestParams
-from ..core.client_wrapper import AsyncClientWrapper
-from ..core.http_response import AsyncHttpResponse
+from ..requests.tool_function import ToolFunctionParams
+from ..requests.tool_kernel_request import ToolKernelRequestParams
+from ..types.create_tool_log_response import CreateToolLogResponse
+from ..types.file_environment_response import FileEnvironmentResponse
+from ..types.file_environment_variable_request import FileEnvironmentVariableRequest
+from ..types.file_sort_by import FileSortBy
+from ..types.files_tool_type import FilesToolType
+from ..types.http_validation_error import HttpValidationError
+from ..types.list_tools import ListTools
+from ..types.log_response import LogResponse
+from ..types.log_status import LogStatus
+from ..types.paginated_data_tool_response import PaginatedDataToolResponse
+from ..types.sort_order import SortOrder
+from ..types.tool_call_response import ToolCallResponse
+from ..types.tool_response import ToolResponse
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -181,18 +184,19 @@ def call(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def log(
self,
@@ -367,18 +371,19 @@ def log(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update(
self,
@@ -496,18 +501,115 @@ def update(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> SyncPager[ToolResponse]:
+ """
+ Get a list of all Tools.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Tools to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Tool name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Tools by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ SyncPager[ToolResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = self._client_wrapper.httpx_client.request(
+ "tools",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataToolResponse,
+ construct_type(
+ type_=PaginatedDataToolResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+ _get_next = lambda: self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
+ )
+ return SyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def upsert(
self,
@@ -603,18 +705,19 @@ def upsert(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get(
self,
@@ -670,18 +773,19 @@ def get(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]:
"""
@@ -709,18 +813,19 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def move(
self,
@@ -777,18 +882,19 @@ def move(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_versions(
self,
@@ -836,18 +942,19 @@ def list_versions(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_tool_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -880,18 +987,19 @@ def delete_tool_version(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_tool_version(
self,
@@ -934,6 +1042,9 @@ def update_tool_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -949,18 +1060,19 @@ def update_tool_version(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -1010,18 +1122,19 @@ def set_deployment(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1057,18 +1170,19 @@ def remove_deployment(
return HttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1106,18 +1220,19 @@ def list_environments(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def update_monitoring(
self,
@@ -1166,6 +1281,9 @@ def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1181,18 +1299,19 @@ def update_monitoring(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def get_environment_variables(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1228,18 +1347,19 @@ def get_environment_variables(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def add_environment_variable(
self,
@@ -1272,6 +1392,9 @@ def add_environment_variable(
json=convert_and_respect_annotation_metadata(
object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
),
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -1287,18 +1410,19 @@ def add_environment_variable(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
def delete_environment_variable(
self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -1337,18 +1461,19 @@ def delete_environment_variable(
return HttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
class AsyncRawToolsClient:
@@ -1495,18 +1620,19 @@ async def call(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def log(
self,
@@ -1681,18 +1807,19 @@ async def log(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update(
self,
@@ -1810,18 +1937,118 @@ async def update(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
+ ),
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
+
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncPager[ToolResponse]:
+ """
+ Get a list of all Tools.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page offset for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Tools to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Tool name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Tools by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncPager[ToolResponse]
+ Successful Response
+ """
+ page = page if page is not None else 1
+
+ _response = await self._client_wrapper.httpx_client.request(
+ "tools",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _parsed_response = typing.cast(
+ PaginatedDataToolResponse,
+ construct_type(
+ type_=PaginatedDataToolResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ _items = _parsed_response.records
+ _has_next = True
+
+ async def _get_next():
+ return await self.list(
+ page=page + 1,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
+ request_options=request_options,
)
+
+ return AsyncPager(
+ has_next=_has_next, items=_items, get_next=_get_next, response=BaseHttpResponse(response=_response)
+ )
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ headers=dict(_response.headers),
+ body=typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def upsert(
self,
@@ -1917,18 +2144,19 @@ async def upsert(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get(
self,
@@ -1984,18 +2212,19 @@ async def get(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2025,18 +2254,19 @@ async def delete(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def move(
self,
@@ -2093,18 +2323,19 @@ async def move(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_versions(
self,
@@ -2152,18 +2383,19 @@ async def list_versions(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_tool_version(
self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2196,18 +2428,19 @@ async def delete_tool_version(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_tool_version(
self,
@@ -2250,6 +2483,9 @@ async def update_tool_version(
"name": name,
"description": description,
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2265,18 +2501,19 @@ async def update_tool_version(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def set_deployment(
self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None
@@ -2326,18 +2563,19 @@ async def set_deployment(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def remove_deployment(
self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2373,18 +2611,19 @@ async def remove_deployment(
return AsyncHttpResponse(response=_response, data=None)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def list_environments(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2422,18 +2661,19 @@ async def list_environments(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def update_monitoring(
self,
@@ -2482,6 +2722,9 @@ async def update_monitoring(
direction="write",
),
},
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2497,18 +2740,19 @@ async def update_monitoring(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def get_environment_variables(
self, id: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2544,18 +2788,19 @@ async def get_environment_variables(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def add_environment_variable(
self,
@@ -2588,6 +2833,9 @@ async def add_environment_variable(
json=convert_and_respect_annotation_metadata(
object_=request, annotation=typing.Sequence[FileEnvironmentVariableRequestParams], direction="write"
),
+ headers={
+ "content-type": "application/json",
+ },
request_options=request_options,
omit=OMIT,
)
@@ -2603,18 +2851,19 @@ async def add_environment_variable(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
async def delete_environment_variable(
self, id: str, name: str, *, request_options: typing.Optional[RequestOptions] = None
@@ -2653,15 +2902,16 @@ async def delete_environment_variable(
return AsyncHttpResponse(response=_response, data=_data)
if _response.status_code == 422:
raise UnprocessableEntityError(
- typing.cast(
+ headers=dict(_response.headers),
+ body=typing.cast(
HttpValidationError,
construct_type(
type_=HttpValidationError, # type: ignore
object_=_response.json(),
),
- )
+ ),
)
_response_json = _response.json()
except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 7c1d30f5..5662ea6d 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -1,5 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
+# isort: skip_file
+
from .agent_call_response import AgentCallResponse
from .agent_call_response_tool_choice import AgentCallResponseToolChoice
from .agent_call_stream_response import AgentCallStreamResponse
@@ -94,6 +96,7 @@
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
+from .file_sort_by import FileSortBy
from .file_type import FileType
from .files_tool_type import FilesToolType
from .flow_kernel_request import FlowKernelRequest
@@ -155,7 +158,6 @@
from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
-from .project_sort_by import ProjectSortBy
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response import PromptCallResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -299,6 +301,7 @@
"FileId",
"FilePath",
"FileRequest",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -356,7 +359,6 @@
"PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallResponse",
"PromptCallResponseToolChoice",
diff --git a/src/humanloop/types/agent_call_response.py b/src/humanloop/types/agent_call_response.py
index ba3bbfec..9bf3bb70 100644
--- a/src/humanloop/types/agent_call_response.py
+++ b/src/humanloop/types/agent_call_response.py
@@ -1,28 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
+from __future__ import annotations
+
+import datetime as dt
import typing
-from .chat_message import ChatMessage
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_call_response_tool_choice import AgentCallResponseToolChoice
-import datetime as dt
+from .chat_message import ChatMessage
from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class AgentCallResponse(UncheckedBaseModel):
@@ -79,7 +67,7 @@ class AgentCallResponse(UncheckedBaseModel):
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
"""
- agent: AgentResponse = pydantic.Field()
+ agent: "AgentResponse" = pydantic.Field()
"""
Agent that generated the Log.
"""
@@ -189,7 +177,7 @@ class AgentCallResponse(UncheckedBaseModel):
Unique identifier for the Log.
"""
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
"""
List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
"""
@@ -204,7 +192,7 @@ class AgentCallResponse(UncheckedBaseModel):
Identifier for the Trace that the Log belongs to.
"""
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
"""
Logs nested under this Log in the Trace.
"""
@@ -222,3 +210,22 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
+
+update_forward_refs(AgentCallResponse)
diff --git a/src/humanloop/types/agent_call_response_tool_choice.py b/src/humanloop/types/agent_call_response_tool_choice.py
index 95eca73e..2d5a032d 100644
--- a/src/humanloop/types/agent_call_response_tool_choice.py
+++ b/src/humanloop/types/agent_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoice
AgentCallResponseToolChoice = typing.Union[
diff --git a/src/humanloop/types/agent_call_stream_response.py b/src/humanloop/types/agent_call_stream_response.py
index 673d3738..c7fa9e1c 100644
--- a/src/humanloop/types/agent_call_stream_response.py
+++ b/src/humanloop/types/agent_call_stream_response.py
@@ -1,26 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
+import datetime as dt
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_call_stream_response_payload import AgentCallStreamResponsePayload
from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class AgentCallStreamResponse(UncheckedBaseModel):
@@ -42,3 +31,21 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(AgentCallStreamResponse)
diff --git a/src/humanloop/types/agent_call_stream_response_payload.py b/src/humanloop/types/agent_call_stream_response_payload.py
index 85422047..38120e12 100644
--- a/src/humanloop/types/agent_call_stream_response_payload.py
+++ b/src/humanloop/types/agent_call_stream_response_payload.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .log_stream_response import LogStreamResponse
+
from .log_response import LogResponse
+from .log_stream_response import LogStreamResponse
from .tool_call import ToolCall
AgentCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_config_response.py b/src/humanloop/types/agent_config_response.py
index 659e73a6..ba346181 100644
--- a/src/humanloop/types/agent_config_response.py
+++ b/src/humanloop/types/agent_config_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class AgentConfigResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/agent_continue_call_response.py b/src/humanloop/types/agent_continue_call_response.py
index c98af953..be988d07 100644
--- a/src/humanloop/types/agent_continue_call_response.py
+++ b/src/humanloop/types/agent_continue_call_response.py
@@ -1,28 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
+from __future__ import annotations
+
+import datetime as dt
import typing
-from .chat_message import ChatMessage
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_continue_call_response_tool_choice import AgentContinueCallResponseToolChoice
-import datetime as dt
+from .chat_message import ChatMessage
from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class AgentContinueCallResponse(UncheckedBaseModel):
@@ -79,7 +67,7 @@ class AgentContinueCallResponse(UncheckedBaseModel):
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
"""
- agent: AgentResponse = pydantic.Field()
+ agent: "AgentResponse" = pydantic.Field()
"""
Agent that generated the Log.
"""
@@ -189,7 +177,7 @@ class AgentContinueCallResponse(UncheckedBaseModel):
Unique identifier for the Log.
"""
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
"""
List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
"""
@@ -204,7 +192,7 @@ class AgentContinueCallResponse(UncheckedBaseModel):
Identifier for the Trace that the Log belongs to.
"""
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
"""
Logs nested under this Log in the Trace.
"""
@@ -222,3 +210,22 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
+
+update_forward_refs(AgentContinueCallResponse)
diff --git a/src/humanloop/types/agent_continue_call_response_tool_choice.py b/src/humanloop/types/agent_continue_call_response_tool_choice.py
index 5b90e98d..731cf6b2 100644
--- a/src/humanloop/types/agent_continue_call_response_tool_choice.py
+++ b/src/humanloop/types/agent_continue_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoice
AgentContinueCallResponseToolChoice = typing.Union[
diff --git a/src/humanloop/types/agent_continue_call_stream_response.py b/src/humanloop/types/agent_continue_call_stream_response.py
index cdd34dce..39f7642d 100644
--- a/src/humanloop/types/agent_continue_call_stream_response.py
+++ b/src/humanloop/types/agent_continue_call_stream_response.py
@@ -1,26 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
+import datetime as dt
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_continue_call_stream_response_payload import AgentContinueCallStreamResponsePayload
from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class AgentContinueCallStreamResponse(UncheckedBaseModel):
@@ -42,3 +31,21 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(AgentContinueCallStreamResponse)
diff --git a/src/humanloop/types/agent_continue_call_stream_response_payload.py b/src/humanloop/types/agent_continue_call_stream_response_payload.py
index 8e23829b..1d51d8d2 100644
--- a/src/humanloop/types/agent_continue_call_stream_response_payload.py
+++ b/src/humanloop/types/agent_continue_call_stream_response_payload.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .log_stream_response import LogStreamResponse
+
from .log_response import LogResponse
+from .log_stream_response import LogStreamResponse
from .tool_call import ToolCall
AgentContinueCallStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
deleted file mode 100644
index 0bbd7858..00000000
--- a/src/humanloop/types/agent_continue_response.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
-from .chat_message import ChatMessage
-import pydantic
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
-import datetime as dt
-from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AgentContinueResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing.Optional[str] = pydantic.Field(default=None)
- """
- Reason the generation finished.
- """
-
- messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponse = pydantic.Field()
- """
- Agent that generated the Log.
- """
-
- start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event started.
- """
-
- end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event ended.
- """
-
- output: typing.Optional[str] = pydantic.Field(default=None)
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing.Optional[str] = pydantic.Field(default=None)
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing.Optional[float] = pydantic.Field(default=None)
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing.Optional[str] = pydantic.Field(default=None)
- """
- Captured log and debug statements.
- """
-
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw request sent to provider.
- """
-
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw response received the provider.
- """
-
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- The inputs passed to the prompt template.
- """
-
- source: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifies where the model was called from.
- """
-
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Any additional metadata to record.
- """
-
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing.Optional[str] = pydantic.Field(default=None)
- """
- End-user ID related to the Log.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str = pydantic.Field()
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
deleted file mode 100644
index 20f3fb75..00000000
--- a/src/humanloop/types/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoice
-
-AgentContinueResponseToolChoice = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
-]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
deleted file mode 100644
index ff7a0fac..00000000
--- a/src/humanloop/types/agent_continue_stream_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
-from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-
-
-class AgentContinueStreamResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing.Optional[AgentContinueStreamResponsePayload] = None
- type: EventType
- created_at: dt.datetime
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
deleted file mode 100644
index 0e5f8a58..00000000
--- a/src/humanloop/types/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponse
-from .log_response import LogResponse
-from .tool_call import ToolCall
-
-AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_inline_tool.py b/src/humanloop/types/agent_inline_tool.py
index dc618c35..466a0b2d 100644
--- a/src/humanloop/types/agent_inline_tool.py
+++ b/src/humanloop/types/agent_inline_tool.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .tool_function import ToolFunction
-from .on_agent_call_enum import OnAgentCallEnum
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .on_agent_call_enum import OnAgentCallEnum
+from .tool_function import ToolFunction
class AgentInlineTool(UncheckedBaseModel):
diff --git a/src/humanloop/types/agent_kernel_request.py b/src/humanloop/types/agent_kernel_request.py
index 6503b104..9cd36a6a 100644
--- a/src/humanloop/types/agent_kernel_request.py
+++ b/src/humanloop/types/agent_kernel_request.py
@@ -1,17 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .model_endpoints import ModelEndpoints
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
+from .agent_kernel_request_stop import AgentKernelRequestStop
from .agent_kernel_request_template import AgentKernelRequestTemplate
-from .template_language import TemplateLanguage
+from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
+from .model_endpoints import ModelEndpoints
from .model_providers import ModelProviders
-from .agent_kernel_request_stop import AgentKernelRequestStop
from .response_format import ResponseFormat
-from .agent_kernel_request_reasoning_effort import AgentKernelRequestReasoningEffort
-from .agent_kernel_request_tools_item import AgentKernelRequestToolsItem
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from .template_language import TemplateLanguage
class AgentKernelRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/agent_kernel_request_reasoning_effort.py b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
index a8e8e98b..3a0d2d24 100644
--- a/src/humanloop/types/agent_kernel_request_reasoning_effort.py
+++ b/src/humanloop/types/agent_kernel_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .open_ai_reasoning_effort import OpenAiReasoningEffort
AgentKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_kernel_request_template.py b/src/humanloop/types/agent_kernel_request_template.py
index 31a351f2..62f4d40f 100644
--- a/src/humanloop/types/agent_kernel_request_template.py
+++ b/src/humanloop/types/agent_kernel_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
AgentKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_kernel_request_tools_item.py b/src/humanloop/types/agent_kernel_request_tools_item.py
index 82c2fecf..043bb29b 100644
--- a/src/humanloop/types/agent_kernel_request_tools_item.py
+++ b/src/humanloop/types/agent_kernel_request_tools_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .agent_linked_file_request import AgentLinkedFileRequest
+
from .agent_inline_tool import AgentInlineTool
+from .agent_linked_file_request import AgentLinkedFileRequest
AgentKernelRequestToolsItem = typing.Union[AgentLinkedFileRequest, AgentInlineTool]
diff --git a/src/humanloop/types/agent_linked_file_request.py b/src/humanloop/types/agent_linked_file_request.py
index 9efd4b6a..5d110bad 100644
--- a/src/humanloop/types/agent_linked_file_request.py
+++ b/src/humanloop/types/agent_linked_file_request.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
from .linked_file_request import LinkedFileRequest
from .on_agent_call_enum import OnAgentCallEnum
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class AgentLinkedFileRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/agent_linked_file_response.py b/src/humanloop/types/agent_linked_file_response.py
index d85d682e..9788f37d 100644
--- a/src/humanloop/types/agent_linked_file_response.py
+++ b/src/humanloop/types/agent_linked_file_response.py
@@ -1,13 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
+
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .linked_file_request import LinkedFileRequest
from .on_agent_call_enum import OnAgentCallEnum
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-from ..core.pydantic_utilities import update_forward_refs
class AgentLinkedFileResponse(UncheckedBaseModel):
@@ -26,14 +27,14 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
-from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_linked_file_response_file import AgentLinkedFileResponseFile # noqa: E402, F401, I001
update_forward_refs(AgentLinkedFileResponse)
diff --git a/src/humanloop/types/agent_linked_file_response_file.py b/src/humanloop/types/agent_linked_file_response_file.py
index 42d38fe4..ab1b384e 100644
--- a/src/humanloop/types/agent_linked_file_response_file.py
+++ b/src/humanloop/types/agent_linked_file_response_file.py
@@ -1,16 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponse
-import typing
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponse
- from .tool_response import ToolResponse
+ from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
- from .agent_response import AgentResponse
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
AgentLinkedFileResponseFile = typing.Union[
"PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
index f5b5e8e8..634ad4d0 100644
--- a/src/humanloop/types/agent_log_response.py
+++ b/src/humanloop/types/agent_log_response.py
@@ -1,24 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+
+import datetime as dt
import typing
-from .chat_message import ChatMessage
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_log_response_tool_choice import AgentLogResponseToolChoice
-import datetime as dt
+from .chat_message import ChatMessage
from .log_status import LogStatus
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class AgentLogResponse(UncheckedBaseModel):
@@ -75,7 +67,7 @@ class AgentLogResponse(UncheckedBaseModel):
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
"""
- agent: AgentResponse = pydantic.Field()
+ agent: "AgentResponse" = pydantic.Field()
"""
Agent that generated the Log.
"""
@@ -215,10 +207,19 @@ class Config:
extra = pydantic.Extra.allow
-from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
-from .flow_log_response import FlowLogResponse # noqa: E402
-from .prompt_log_response import PromptLogResponse # noqa: E402
-from .tool_log_response import ToolLogResponse # noqa: E402
-from .log_response import LogResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
update_forward_refs(AgentLogResponse)
diff --git a/src/humanloop/types/agent_log_response_tool_choice.py b/src/humanloop/types/agent_log_response_tool_choice.py
index 5cb07628..bf642cf5 100644
--- a/src/humanloop/types/agent_log_response_tool_choice.py
+++ b/src/humanloop/types/agent_log_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoice
AgentLogResponseToolChoice = typing.Union[
diff --git a/src/humanloop/types/agent_log_stream_response.py b/src/humanloop/types/agent_log_stream_response.py
index 91547189..fb577067 100644
--- a/src/humanloop/types/agent_log_stream_response.py
+++ b/src/humanloop/types/agent_log_stream_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import datetime as dt
import typing
+
import pydantic
-import datetime as dt
-from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message import ChatMessage
class AgentLogStreamResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
index e58aaeba..cdc54812 100644
--- a/src/humanloop/types/agent_response.py
+++ b/src/humanloop/types/agent_response.py
@@ -1,26 +1,27 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
-from .model_endpoints import ModelEndpoints
-from .agent_response_template import AgentResponseTemplate
-from .template_language import TemplateLanguage
-from .model_providers import ModelProviders
-from .agent_response_stop import AgentResponseStop
-from .response_format import ResponseFormat
-from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+
+import pydantic
import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .agent_response_reasoning_effort import AgentResponseReasoningEffort
+from .agent_response_stop import AgentResponseStop
+from .agent_response_template import AgentResponseTemplate
from .environment_response import EnvironmentResponse
-import datetime as dt
+from .evaluator_aggregate import EvaluatorAggregate
+from .input_response import InputResponse
+from .model_endpoints import ModelEndpoints
+from .model_providers import ModelProviders
+from .response_format import ResponseFormat
+from .template_language import TemplateLanguage
from .user_response import UserResponse
from .version_status import VersionStatus
-from .input_response import InputResponse
-from .evaluator_aggregate import EvaluatorAggregate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class AgentResponse(UncheckedBaseModel):
@@ -237,6 +238,11 @@ class AgentResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Agent Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -247,14 +253,14 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
-from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_response_tools_item import AgentResponseToolsItem # noqa: E402, F401, I001
update_forward_refs(AgentResponse)
diff --git a/src/humanloop/types/agent_response_reasoning_effort.py b/src/humanloop/types/agent_response_reasoning_effort.py
index 59254f38..b6fa28cd 100644
--- a/src/humanloop/types/agent_response_reasoning_effort.py
+++ b/src/humanloop/types/agent_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .open_ai_reasoning_effort import OpenAiReasoningEffort
AgentResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/agent_response_template.py b/src/humanloop/types/agent_response_template.py
index 4c084dc8..f5064815 100644
--- a/src/humanloop/types/agent_response_template.py
+++ b/src/humanloop/types/agent_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
AgentResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/agent_response_tools_item.py b/src/humanloop/types/agent_response_tools_item.py
index 8095608f..da6970e2 100644
--- a/src/humanloop/types/agent_response_tools_item.py
+++ b/src/humanloop/types/agent_response_tools_item.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .agent_inline_tool import AgentInlineTool
-import typing
if typing.TYPE_CHECKING:
from .agent_linked_file_response import AgentLinkedFileResponse
diff --git a/src/humanloop/types/anthropic_redacted_thinking_content.py b/src/humanloop/types/anthropic_redacted_thinking_content.py
index ebac897b..3e8e782e 100644
--- a/src/humanloop/types/anthropic_redacted_thinking_content.py
+++ b/src/humanloop/types/anthropic_redacted_thinking_content.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class AnthropicRedactedThinkingContent(UncheckedBaseModel):
diff --git a/src/humanloop/types/anthropic_thinking_content.py b/src/humanloop/types/anthropic_thinking_content.py
index bf7fc808..f61501bd 100644
--- a/src/humanloop/types/anthropic_thinking_content.py
+++ b/src/humanloop/types/anthropic_thinking_content.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class AnthropicThinkingContent(UncheckedBaseModel):
diff --git a/src/humanloop/types/boolean_evaluator_stats_response.py b/src/humanloop/types/boolean_evaluator_stats_response.py
index 3deca81b..9452d923 100644
--- a/src/humanloop/types/boolean_evaluator_stats_response.py
+++ b/src/humanloop/types/boolean_evaluator_stats_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class BooleanEvaluatorStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/chat_message.py b/src/humanloop/types/chat_message.py
index c72bc90d..348752b5 100644
--- a/src/humanloop/types/chat_message.py
+++ b/src/humanloop/types/chat_message.py
@@ -1,13 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .chat_message_content import ChatMessageContent
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message_content import ChatMessageContent
+from .chat_message_thinking_item import ChatMessageThinkingItem
from .chat_role import ChatRole
from .tool_call import ToolCall
-from .chat_message_thinking_item import ChatMessageThinkingItem
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class ChatMessage(UncheckedBaseModel):
diff --git a/src/humanloop/types/chat_message_content.py b/src/humanloop/types/chat_message_content.py
index 3b318dc2..fd31fa21 100644
--- a/src/humanloop/types/chat_message_content.py
+++ b/src/humanloop/types/chat_message_content.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message_content_item import ChatMessageContentItem
ChatMessageContent = typing.Union[str, typing.List[ChatMessageContentItem]]
diff --git a/src/humanloop/types/chat_message_content_item.py b/src/humanloop/types/chat_message_content_item.py
index e640e362..1d27b28d 100644
--- a/src/humanloop/types/chat_message_content_item.py
+++ b/src/humanloop/types/chat_message_content_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .text_chat_content import TextChatContent
+
from .image_chat_content import ImageChatContent
+from .text_chat_content import TextChatContent
ChatMessageContentItem = typing.Union[TextChatContent, ImageChatContent]
diff --git a/src/humanloop/types/chat_message_thinking_item.py b/src/humanloop/types/chat_message_thinking_item.py
index 0a507724..2885c825 100644
--- a/src/humanloop/types/chat_message_thinking_item.py
+++ b/src/humanloop/types/chat_message_thinking_item.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .anthropic_thinking_content import AnthropicThinkingContent
+
from .anthropic_redacted_thinking_content import AnthropicRedactedThinkingContent
+from .anthropic_thinking_content import AnthropicThinkingContent
ChatMessageThinkingItem = typing.Union[AnthropicThinkingContent, AnthropicRedactedThinkingContent]
diff --git a/src/humanloop/types/code_evaluator_request.py b/src/humanloop/types/code_evaluator_request.py
index 9a07ca37..e8c574f9 100644
--- a/src/humanloop/types/code_evaluator_request.py
+++ b/src/humanloop/types/code_evaluator_request.py
@@ -1,14 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .evaluator_arguments_type import EvaluatorArgumentsType
-import pydantic
-from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
+from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .valence import Valence
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class CodeEvaluatorRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
index 9dc66629..2fe74aa4 100644
--- a/src/humanloop/types/create_agent_log_response.py
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .log_status import LogStatus
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .log_status import LogStatus
class CreateAgentLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_datapoint_request.py b/src/humanloop/types/create_datapoint_request.py
index d72c703e..31f3e4f7 100644
--- a/src/humanloop/types/create_datapoint_request.py
+++ b/src/humanloop/types/create_datapoint_request.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class CreateDatapointRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_evaluator_log_response.py b/src/humanloop/types/create_evaluator_log_response.py
index 3f97bc88..9f917d3d 100644
--- a/src/humanloop/types/create_evaluator_log_response.py
+++ b/src/humanloop/types/create_evaluator_log_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class CreateEvaluatorLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_flow_log_response.py b/src/humanloop/types/create_flow_log_response.py
index df5846bd..ae296a6f 100644
--- a/src/humanloop/types/create_flow_log_response.py
+++ b/src/humanloop/types/create_flow_log_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .log_status import LogStatus
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .log_status import LogStatus
class CreateFlowLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_prompt_log_response.py b/src/humanloop/types/create_prompt_log_response.py
index d1df9ad0..cd80d43b 100644
--- a/src/humanloop/types/create_prompt_log_response.py
+++ b/src/humanloop/types/create_prompt_log_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class CreatePromptLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/create_tool_log_response.py b/src/humanloop/types/create_tool_log_response.py
index ba4303c7..6ba171fa 100644
--- a/src/humanloop/types/create_tool_log_response.py
+++ b/src/humanloop/types/create_tool_log_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class CreateToolLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/dashboard_configuration.py b/src/humanloop/types/dashboard_configuration.py
index a75f961b..f5d752d8 100644
--- a/src/humanloop/types/dashboard_configuration.py
+++ b/src/humanloop/types/dashboard_configuration.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .time_unit import TimeUnit
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .time_unit import TimeUnit
class DashboardConfiguration(UncheckedBaseModel):
diff --git a/src/humanloop/types/datapoint_response.py b/src/humanloop/types/datapoint_response.py
index 56680a0a..2eb4de68 100644
--- a/src/humanloop/types/datapoint_response.py
+++ b/src/humanloop/types/datapoint_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
from .datapoint_response_target_value import DatapointResponseTargetValue
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class DatapointResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py
index 2c614521..9153374a 100644
--- a/src/humanloop/types/dataset_response.py
+++ b/src/humanloop/types/dataset_response.py
@@ -1,15 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+import datetime as dt
import typing
+
+import pydantic
import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .datapoint_response import DatapointResponse
from .environment_response import EnvironmentResponse
-import datetime as dt
from .user_response import UserResponse
-from .datapoint_response import DatapointResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class DatasetResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/directory_response.py b/src/humanloop/types/directory_response.py
index 54d9e89c..a56f0732 100644
--- a/src/humanloop/types/directory_response.py
+++ b/src/humanloop/types/directory_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
-import typing
import datetime as dt
+import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class DirectoryResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py
index 51f879b8..a04de500 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response.py
@@ -1,21 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
-import typing
+from __future__ import annotations
+
import datetime as dt
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .directory_response import DirectoryResponse
from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class DirectoryWithParentsAndChildrenResponse(UncheckedBaseModel):
@@ -79,3 +73,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(DirectoryWithParentsAndChildrenResponse)
diff --git a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
index 9d0d5fc4..2c418d75 100644
--- a/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
+++ b/src/humanloop/types/directory_with_parents_and_children_response_files_item.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .evaluator_response import EvaluatorResponse
+
+from .agent_response import AgentResponse
from .dataset_response import DatasetResponse
+from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
-from .agent_response import AgentResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
DirectoryWithParentsAndChildrenResponseFilesItem = typing.Union[
PromptResponse, ToolResponse, EvaluatorResponse, DatasetResponse, FlowResponse, AgentResponse
diff --git a/src/humanloop/types/environment_response.py b/src/humanloop/types/environment_response.py
index c39a1aa5..23c0ab8f 100644
--- a/src/humanloop/types/environment_response.py
+++ b/src/humanloop/types/environment_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import datetime as dt
-from .environment_tag import EnvironmentTag
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .environment_tag import EnvironmentTag
class EnvironmentResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluatee_request.py b/src/humanloop/types/evaluatee_request.py
index 5ada44d2..a51c07aa 100644
--- a/src/humanloop/types/evaluatee_request.py
+++ b/src/humanloop/types/evaluatee_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluateeRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py
index 4332aa12..0a2169e0 100644
--- a/src/humanloop/types/evaluatee_response.py
+++ b/src/humanloop/types/evaluatee_response.py
@@ -1,20 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
+import datetime as dt
import typing
-from .run_version_response import RunVersionResponse
+
import pydantic
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .run_version_response import RunVersionResponse
class EvaluateeResponse(UncheckedBaseModel):
@@ -51,3 +45,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluateeResponse)
diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py
index 0c7de27e..c63ebb8d 100644
--- a/src/humanloop/types/evaluation_evaluator_response.py
+++ b/src/humanloop/types/evaluation_evaluator_response.py
@@ -1,23 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
+from __future__ import annotations
+
import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+
class EvaluationEvaluatorResponse(UncheckedBaseModel):
- version: EvaluatorResponse
+ version: "EvaluatorResponse"
orchestrated: bool = pydantic.Field()
"""
Whether the Evaluator is orchestrated by Humanloop. Default is `True`. If `False`, a log for the Evaluator should be submitted by the user via the API.
@@ -36,3 +30,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluationEvaluatorResponse)
diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py
index 84d117e2..bd2864f2 100644
--- a/src/humanloop/types/evaluation_log_response.py
+++ b/src/humanloop/types/evaluation_log_response.py
@@ -1,25 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
+from __future__ import annotations
+
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .datapoint_response import DatapointResponse
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class EvaluationLogResponse(UncheckedBaseModel):
@@ -33,12 +21,12 @@ class EvaluationLogResponse(UncheckedBaseModel):
The Datapoint used to generate the Log
"""
- log: LogResponse = pydantic.Field()
+ log: "LogResponse" = pydantic.Field()
"""
The Log that was evaluated by the Evaluator.
"""
- evaluator_logs: typing.List[LogResponse] = pydantic.Field()
+ evaluator_logs: typing.List["LogResponse"] = pydantic.Field()
"""
The Evaluator Logs containing the judgments for the Log.
"""
@@ -51,3 +39,22 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluationLogResponse)
diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py
index bcda94a4..dc32e6dc 100644
--- a/src/humanloop/types/evaluation_response.py
+++ b/src/humanloop/types/evaluation_response.py
@@ -1,21 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
+from __future__ import annotations
+
+import datetime as dt
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .evaluation_evaluator_response import EvaluationEvaluatorResponse
-import datetime as dt
from .user_response import UserResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class EvaluationResponse(UncheckedBaseModel):
@@ -60,3 +54,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluationResponse)
diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py
index 74d59e4c..d2977f63 100644
--- a/src/humanloop/types/evaluation_run_response.py
+++ b/src/humanloop/types/evaluation_run_response.py
@@ -1,23 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
+from __future__ import annotations
+
+import datetime as dt
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .dataset_response import DatasetResponse
+from .evaluation_status import EvaluationStatus
from .run_version_response import RunVersionResponse
-import datetime as dt
from .user_response import UserResponse
-from .evaluation_status import EvaluationStatus
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class EvaluationRunResponse(UncheckedBaseModel):
@@ -74,3 +68,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluationRunResponse)
diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py
index e09b2a73..e815d1e7 100644
--- a/src/humanloop/types/evaluation_runs_response.py
+++ b/src/humanloop/types/evaluation_runs_response.py
@@ -1,19 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from .evaluation_run_response import EvaluationRunResponse
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_run_response import EvaluationRunResponse
class EvaluationRunsResponse(UncheckedBaseModel):
@@ -30,3 +24,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(EvaluationRunsResponse)
diff --git a/src/humanloop/types/evaluation_stats.py b/src/humanloop/types/evaluation_stats.py
index 9a6a07a7..656d45d0 100644
--- a/src/humanloop/types/evaluation_stats.py
+++ b/src/humanloop/types/evaluation_stats.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .run_stats_response import RunStatsResponse
+
import pydantic
-from .evaluation_status import EvaluationStatus
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_status import EvaluationStatus
+from .run_stats_response import RunStatsResponse
class EvaluationStats(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_activation_deactivation_request.py b/src/humanloop/types/evaluator_activation_deactivation_request.py
index a3cc9e9f..f9c6023e 100644
--- a/src/humanloop/types/evaluator_activation_deactivation_request.py
+++ b/src/humanloop/types/evaluator_activation_deactivation_request.py
@@ -1,13 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluator_activation_deactivation_request_activate_item import EvaluatorActivationDeactivationRequestActivateItem
from .evaluator_activation_deactivation_request_deactivate_item import (
EvaluatorActivationDeactivationRequestDeactivateItem,
)
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class EvaluatorActivationDeactivationRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py
index 9d13b402..6d2039b9 100644
--- a/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py
+++ b/src/humanloop/types/evaluator_activation_deactivation_request_activate_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
+
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
+from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
EvaluatorActivationDeactivationRequestActivateItem = typing.Union[
MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest
diff --git a/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py b/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py
index d44dd59f..6eb65d03 100644
--- a/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py
+++ b/src/humanloop/types/evaluator_activation_deactivation_request_deactivate_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
+
from .monitoring_evaluator_environment_request import MonitoringEvaluatorEnvironmentRequest
+from .monitoring_evaluator_version_request import MonitoringEvaluatorVersionRequest
EvaluatorActivationDeactivationRequestDeactivateItem = typing.Union[
MonitoringEvaluatorVersionRequest, MonitoringEvaluatorEnvironmentRequest
diff --git a/src/humanloop/types/evaluator_aggregate.py b/src/humanloop/types/evaluator_aggregate.py
index 149bd04e..5c24915a 100644
--- a/src/humanloop/types/evaluator_aggregate.py
+++ b/src/humanloop/types/evaluator_aggregate.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+
class EvaluatorAggregate(UncheckedBaseModel):
value: float = pydantic.Field()
diff --git a/src/humanloop/types/evaluator_config_response.py b/src/humanloop/types/evaluator_config_response.py
index 6dace186..00bd5cd8 100644
--- a/src/humanloop/types/evaluator_config_response.py
+++ b/src/humanloop/types/evaluator_config_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluatorConfigResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_file_id.py b/src/humanloop/types/evaluator_file_id.py
index cbe7a085..6c3b3141 100644
--- a/src/humanloop/types/evaluator_file_id.py
+++ b/src/humanloop/types/evaluator_file_id.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluatorFileId(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_file_path.py b/src/humanloop/types/evaluator_file_path.py
index c8ce2a0c..cd967935 100644
--- a/src/humanloop/types/evaluator_file_path.py
+++ b/src/humanloop/types/evaluator_file_path.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluatorFilePath(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_judgment_number_limit.py b/src/humanloop/types/evaluator_judgment_number_limit.py
index cd0ebae3..289afd4c 100644
--- a/src/humanloop/types/evaluator_judgment_number_limit.py
+++ b/src/humanloop/types/evaluator_judgment_number_limit.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluatorJudgmentNumberLimit(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_judgment_option_response.py b/src/humanloop/types/evaluator_judgment_option_response.py
index 577c7a49..3b3a78e3 100644
--- a/src/humanloop/types/evaluator_judgment_option_response.py
+++ b/src/humanloop/types/evaluator_judgment_option_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .valence import Valence
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .valence import Valence
class EvaluatorJudgmentOptionResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index 71ca76c0..e006e7a2 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -1,24 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
+
import datetime as dt
+import typing
+
import pydantic
-from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .log_status import LogStatus
class EvaluatorLogResponse(UncheckedBaseModel):
@@ -171,7 +163,7 @@ class EvaluatorLogResponse(UncheckedBaseModel):
Logs nested under this Log in the Trace.
"""
- evaluator: EvaluatorResponse = pydantic.Field()
+ evaluator: "EvaluatorResponse" = pydantic.Field()
"""
Evaluator used to generate the judgment.
"""
@@ -191,10 +183,19 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_log_response import AgentLogResponse # noqa: E402
-from .flow_log_response import FlowLogResponse # noqa: E402
-from .prompt_log_response import PromptLogResponse # noqa: E402
-from .tool_log_response import ToolLogResponse # noqa: E402
-from .log_response import LogResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
update_forward_refs(EvaluatorLogResponse)
diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py
index 712ca698..0af149d3 100644
--- a/src/humanloop/types/evaluator_response.py
+++ b/src/humanloop/types/evaluator_response.py
@@ -1,19 +1,20 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
-from .evaluator_response_spec import EvaluatorResponseSpec
+
+import pydantic
import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
from .environment_response import EnvironmentResponse
-import datetime as dt
-from .user_response import UserResponse
-from .input_response import InputResponse
from .evaluator_aggregate import EvaluatorAggregate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .evaluator_response_spec import EvaluatorResponseSpec
+from .input_response import InputResponse
+from .user_response import UserResponse
class EvaluatorResponse(UncheckedBaseModel):
@@ -133,13 +134,13 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
update_forward_refs(EvaluatorResponse)
diff --git a/src/humanloop/types/evaluator_response_spec.py b/src/humanloop/types/evaluator_response_spec.py
index b14bca0a..45eb1790 100644
--- a/src/humanloop/types/evaluator_response_spec.py
+++ b/src/humanloop/types/evaluator_response_spec.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .llm_evaluator_request import LlmEvaluatorRequest
+
from .code_evaluator_request import CodeEvaluatorRequest
-from .human_evaluator_request import HumanEvaluatorRequest
from .external_evaluator_request import ExternalEvaluatorRequest
+from .human_evaluator_request import HumanEvaluatorRequest
+from .llm_evaluator_request import LlmEvaluatorRequest
EvaluatorResponseSpec = typing.Union[
LlmEvaluatorRequest, CodeEvaluatorRequest, HumanEvaluatorRequest, ExternalEvaluatorRequest
diff --git a/src/humanloop/types/evaluator_version_id.py b/src/humanloop/types/evaluator_version_id.py
index 9d961bc1..688acf9a 100644
--- a/src/humanloop/types/evaluator_version_id.py
+++ b/src/humanloop/types/evaluator_version_id.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class EvaluatorVersionId(UncheckedBaseModel):
diff --git a/src/humanloop/types/external_evaluator_request.py b/src/humanloop/types/external_evaluator_request.py
index 6c1b4561..9f528f67 100644
--- a/src/humanloop/types/external_evaluator_request.py
+++ b/src/humanloop/types/external_evaluator_request.py
@@ -1,14 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .evaluator_arguments_type import EvaluatorArgumentsType
-import pydantic
-from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
+from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .valence import Valence
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class ExternalEvaluatorRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py
index 7f34b7b3..f3a26ef0 100644
--- a/src/humanloop/types/file_environment_response.py
+++ b/src/humanloop/types/file_environment_response.py
@@ -1,21 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import datetime as dt
-from .environment_tag import EnvironmentTag
import typing
-from .file_environment_response_file import FileEnvironmentResponseFile
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .environment_tag import EnvironmentTag
+from .file_environment_response_file import FileEnvironmentResponseFile
class FileEnvironmentResponse(UncheckedBaseModel):
@@ -42,3 +36,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(FileEnvironmentResponse)
diff --git a/src/humanloop/types/file_environment_response_file.py b/src/humanloop/types/file_environment_response_file.py
index 0254c2b8..2725177a 100644
--- a/src/humanloop/types/file_environment_response_file.py
+++ b/src/humanloop/types/file_environment_response_file.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
+
+from .agent_response import AgentResponse
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
-from .agent_response import AgentResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
FileEnvironmentResponseFile = typing.Union[
PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse
diff --git a/src/humanloop/types/file_environment_variable_request.py b/src/humanloop/types/file_environment_variable_request.py
index 8108245b..112e9602 100644
--- a/src/humanloop/types/file_environment_variable_request.py
+++ b/src/humanloop/types/file_environment_variable_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class FileEnvironmentVariableRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/file_id.py b/src/humanloop/types/file_id.py
index c0d6243b..fe049681 100644
--- a/src/humanloop/types/file_id.py
+++ b/src/humanloop/types/file_id.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FileId(UncheckedBaseModel):
diff --git a/src/humanloop/types/file_path.py b/src/humanloop/types/file_path.py
index 5187d11f..3f4f7591 100644
--- a/src/humanloop/types/file_path.py
+++ b/src/humanloop/types/file_path.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FilePath(UncheckedBaseModel):
diff --git a/src/humanloop/types/file_request.py b/src/humanloop/types/file_request.py
index 9b2e36f7..ba9518e2 100644
--- a/src/humanloop/types/file_request.py
+++ b/src/humanloop/types/file_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FileRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py
new file mode 100644
index 00000000..b3135c3b
--- /dev/null
+++ b/src/humanloop/types/file_sort_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/flow_kernel_request.py b/src/humanloop/types/flow_kernel_request.py
index 393c8375..9b9adec9 100644
--- a/src/humanloop/types/flow_kernel_request.py
+++ b/src/humanloop/types/flow_kernel_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FlowKernelRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index 58a87fac..188c1fdf 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -1,23 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+
+import datetime as dt
import typing
-from .chat_message import ChatMessage
+
import pydantic
-import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message import ChatMessage
from .log_status import LogStatus
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class FlowLogResponse(UncheckedBaseModel):
@@ -160,7 +152,7 @@ class FlowLogResponse(UncheckedBaseModel):
Logs nested under this Log in the Trace.
"""
- flow: FlowResponse = pydantic.Field()
+ flow: "FlowResponse" = pydantic.Field()
"""
Flow used to generate the Log.
"""
@@ -175,10 +167,19 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_log_response import AgentLogResponse # noqa: E402
-from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
-from .prompt_log_response import PromptLogResponse # noqa: E402
-from .tool_log_response import ToolLogResponse # noqa: E402
-from .log_response import LogResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
update_forward_refs(FlowLogResponse)
diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py
index 7768778e..826b9238 100644
--- a/src/humanloop/types/flow_response.py
+++ b/src/humanloop/types/flow_response.py
@@ -1,17 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
+
+import pydantic
import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
from .environment_response import EnvironmentResponse
-import datetime as dt
-from .user_response import UserResponse
from .evaluator_aggregate import EvaluatorAggregate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .user_response import UserResponse
class FlowResponse(UncheckedBaseModel):
@@ -120,13 +121,13 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
update_forward_refs(FlowResponse)
diff --git a/src/humanloop/types/function_tool.py b/src/humanloop/types/function_tool.py
index 0045c8cb..faef2899 100644
--- a/src/humanloop/types/function_tool.py
+++ b/src/humanloop/types/function_tool.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FunctionTool(UncheckedBaseModel):
diff --git a/src/humanloop/types/function_tool_choice.py b/src/humanloop/types/function_tool_choice.py
index 72b11a8b..43d0eeb6 100644
--- a/src/humanloop/types/function_tool_choice.py
+++ b/src/humanloop/types/function_tool_choice.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class FunctionToolChoice(UncheckedBaseModel):
diff --git a/src/humanloop/types/http_validation_error.py b/src/humanloop/types/http_validation_error.py
index 5449e986..188935a0 100644
--- a/src/humanloop/types/http_validation_error.py
+++ b/src/humanloop/types/http_validation_error.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .validation_error import ValidationError
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .validation_error import ValidationError
class HttpValidationError(UncheckedBaseModel):
diff --git a/src/humanloop/types/human_evaluator_request.py b/src/humanloop/types/human_evaluator_request.py
index 8d69f1cc..ef604880 100644
--- a/src/humanloop/types/human_evaluator_request.py
+++ b/src/humanloop/types/human_evaluator_request.py
@@ -1,14 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .evaluator_arguments_type import EvaluatorArgumentsType
-import pydantic
-from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
+from .human_evaluator_request_return_type import HumanEvaluatorRequestReturnType
from .valence import Valence
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class HumanEvaluatorRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/image_chat_content.py b/src/humanloop/types/image_chat_content.py
index acbc193b..9e12716d 100644
--- a/src/humanloop/types/image_chat_content.py
+++ b/src/humanloop/types/image_chat_content.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .image_url import ImageUrl
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .image_url import ImageUrl
class ImageChatContent(UncheckedBaseModel):
diff --git a/src/humanloop/types/image_url.py b/src/humanloop/types/image_url.py
index 033c1a2c..ed170dea 100644
--- a/src/humanloop/types/image_url.py
+++ b/src/humanloop/types/image_url.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .image_url_detail import ImageUrlDetail
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .image_url_detail import ImageUrlDetail
class ImageUrl(UncheckedBaseModel):
diff --git a/src/humanloop/types/input_response.py b/src/humanloop/types/input_response.py
index 6bec2dab..36cfa6ed 100644
--- a/src/humanloop/types/input_response.py
+++ b/src/humanloop/types/input_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class InputResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/linked_file_request.py b/src/humanloop/types/linked_file_request.py
index ee45ffdf..7ce2bc95 100644
--- a/src/humanloop/types/linked_file_request.py
+++ b/src/humanloop/types/linked_file_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class LinkedFileRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/linked_tool_response.py b/src/humanloop/types/linked_tool_response.py
index 2b41496e..95bc2492 100644
--- a/src/humanloop/types/linked_tool_response.py
+++ b/src/humanloop/types/linked_tool_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class LinkedToolResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/list_agents.py b/src/humanloop/types/list_agents.py
index 36481f41..526d9b9b 100644
--- a/src/humanloop/types/list_agents.py
+++ b/src/humanloop/types/list_agents.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class ListAgents(UncheckedBaseModel):
- records: typing.List[AgentResponse] = pydantic.Field()
+ records: typing.List["AgentResponse"] = pydantic.Field()
"""
The list of Agents.
"""
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(ListAgents)
diff --git a/src/humanloop/types/list_datasets.py b/src/humanloop/types/list_datasets.py
index cd1ee35e..3e4de370 100644
--- a/src/humanloop/types/list_datasets.py
+++ b/src/humanloop/types/list_datasets.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .dataset_response import DatasetResponse
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .dataset_response import DatasetResponse
class ListDatasets(UncheckedBaseModel):
diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py
index 7b736e14..4ec412cb 100644
--- a/src/humanloop/types/list_evaluators.py
+++ b/src/humanloop/types/list_evaluators.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class ListEvaluators(UncheckedBaseModel):
- records: typing.List[EvaluatorResponse] = pydantic.Field()
+ records: typing.List["EvaluatorResponse"] = pydantic.Field()
"""
The list of Evaluators.
"""
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(ListEvaluators)
diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py
index 41ec4008..ce407328 100644
--- a/src/humanloop/types/list_flows.py
+++ b/src/humanloop/types/list_flows.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class ListFlows(UncheckedBaseModel):
- records: typing.List[FlowResponse] = pydantic.Field()
+ records: typing.List["FlowResponse"] = pydantic.Field()
"""
The list of Flows.
"""
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(ListFlows)
diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py
index f773d3f9..42d01cf0 100644
--- a/src/humanloop/types/list_prompts.py
+++ b/src/humanloop/types/list_prompts.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class ListPrompts(UncheckedBaseModel):
- records: typing.List[PromptResponse] = pydantic.Field()
+ records: typing.List["PromptResponse"] = pydantic.Field()
"""
The list of Prompts.
"""
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(ListPrompts)
diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py
index 84ddc89c..7b2e7c70 100644
--- a/src/humanloop/types/list_tools.py
+++ b/src/humanloop/types/list_tools.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class ListTools(UncheckedBaseModel):
- records: typing.List[ToolResponse] = pydantic.Field()
+ records: typing.List["ToolResponse"] = pydantic.Field()
"""
The list of Tools.
"""
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(ListTools)
diff --git a/src/humanloop/types/llm_evaluator_request.py b/src/humanloop/types/llm_evaluator_request.py
index 8f623730..c2061bfa 100644
--- a/src/humanloop/types/llm_evaluator_request.py
+++ b/src/humanloop/types/llm_evaluator_request.py
@@ -1,15 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .evaluator_arguments_type import EvaluatorArgumentsType
-import pydantic
-from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
-import typing
-from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
from .evaluator_judgment_number_limit import EvaluatorJudgmentNumberLimit
-from .valence import Valence
+from .evaluator_judgment_option_response import EvaluatorJudgmentOptionResponse
+from .evaluator_return_type_enum import EvaluatorReturnTypeEnum
from .prompt_kernel_request import PromptKernelRequest
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from .valence import Valence
class LlmEvaluatorRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/log_response.py b/src/humanloop/types/log_response.py
index cd7a0a26..e6f60fcb 100644
--- a/src/humanloop/types/log_response.py
+++ b/src/humanloop/types/log_response.py
@@ -1,15 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-import typing
+
import typing
if typing.TYPE_CHECKING:
- from .prompt_log_response import PromptLogResponse
- from .tool_log_response import ToolLogResponse
+ from .agent_log_response import AgentLogResponse
from .evaluator_log_response import EvaluatorLogResponse
from .flow_log_response import FlowLogResponse
- from .agent_log_response import AgentLogResponse
+ from .prompt_log_response import PromptLogResponse
+ from .tool_log_response import ToolLogResponse
LogResponse = typing.Union[
"PromptLogResponse", "ToolLogResponse", "EvaluatorLogResponse", "FlowLogResponse", "AgentLogResponse"
]
diff --git a/src/humanloop/types/log_stream_response.py b/src/humanloop/types/log_stream_response.py
index 69ffacf4..2687e2ea 100644
--- a/src/humanloop/types/log_stream_response.py
+++ b/src/humanloop/types/log_stream_response.py
@@ -1,7 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_call_stream_response import PromptCallStreamResponse
+
from .agent_log_stream_response import AgentLogStreamResponse
+from .prompt_call_stream_response import PromptCallStreamResponse
LogStreamResponse = typing.Union[PromptCallStreamResponse, AgentLogStreamResponse]
diff --git a/src/humanloop/types/monitoring_evaluator_environment_request.py b/src/humanloop/types/monitoring_evaluator_environment_request.py
index 254dcc98..cd3b8491 100644
--- a/src/humanloop/types/monitoring_evaluator_environment_request.py
+++ b/src/humanloop/types/monitoring_evaluator_environment_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class MonitoringEvaluatorEnvironmentRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py
index 1809af57..1c08f955 100644
--- a/src/humanloop/types/monitoring_evaluator_response.py
+++ b/src/humanloop/types/monitoring_evaluator_response.py
@@ -1,13 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .monitoring_evaluator_state import MonitoringEvaluatorState
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class MonitoringEvaluatorResponse(UncheckedBaseModel):
@@ -39,14 +40,14 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
-from .version_reference_response import VersionReferenceResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .version_reference_response import VersionReferenceResponse # noqa: E402, F401, I001
update_forward_refs(MonitoringEvaluatorResponse)
diff --git a/src/humanloop/types/monitoring_evaluator_version_request.py b/src/humanloop/types/monitoring_evaluator_version_request.py
index d97672d8..8adfb290 100644
--- a/src/humanloop/types/monitoring_evaluator_version_request.py
+++ b/src/humanloop/types/monitoring_evaluator_version_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class MonitoringEvaluatorVersionRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/numeric_evaluator_stats_response.py b/src/humanloop/types/numeric_evaluator_stats_response.py
index 1517d9cf..6bed0547 100644
--- a/src/humanloop/types/numeric_evaluator_stats_response.py
+++ b/src/humanloop/types/numeric_evaluator_stats_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class NumericEvaluatorStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/overall_stats.py b/src/humanloop/types/overall_stats.py
index c3753321..d00145b9 100644
--- a/src/humanloop/types/overall_stats.py
+++ b/src/humanloop/types/overall_stats.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class OverallStats(UncheckedBaseModel):
diff --git a/src/humanloop/types/paginated_data_agent_response.py b/src/humanloop/types/paginated_data_agent_response.py
index 0febbadd..ecc67072 100644
--- a/src/humanloop/types/paginated_data_agent_response.py
+++ b/src/humanloop/types/paginated_data_agent_response.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataAgentResponse(UncheckedBaseModel):
- records: typing.List[AgentResponse]
+ records: typing.List["AgentResponse"]
page: int
size: int
total: int
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataAgentResponse)
diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py
index c508f8a6..17baff0c 100644
--- a/src/humanloop/types/paginated_data_evaluation_log_response.py
+++ b/src/humanloop/types/paginated_data_evaluation_log_response.py
@@ -1,24 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from .evaluation_log_response import EvaluationLogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_log_response import EvaluationLogResponse
class PaginatedDataEvaluationLogResponse(UncheckedBaseModel):
@@ -35,3 +24,21 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataEvaluationLogResponse)
diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py
index 2e82c736..47a835e6 100644
--- a/src/humanloop/types/paginated_data_evaluator_response.py
+++ b/src/humanloop/types/paginated_data_evaluator_response.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataEvaluatorResponse(UncheckedBaseModel):
- records: typing.List[EvaluatorResponse]
+ records: typing.List["EvaluatorResponse"]
page: int
size: int
total: int
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataEvaluatorResponse)
diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py
index 6cfcf9ae..2775ec74 100644
--- a/src/humanloop/types/paginated_data_flow_response.py
+++ b/src/humanloop/types/paginated_data_flow_response.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataFlowResponse(UncheckedBaseModel):
- records: typing.List[FlowResponse]
+ records: typing.List["FlowResponse"]
page: int
size: int
total: int
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataFlowResponse)
diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py
index f41ca9ba..1354146d 100644
--- a/src/humanloop/types/paginated_data_log_response.py
+++ b/src/humanloop/types/paginated_data_log_response.py
@@ -1,28 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataLogResponse(UncheckedBaseModel):
- records: typing.List[LogResponse]
+ records: typing.List["LogResponse"]
page: int
size: int
total: int
@@ -35,3 +23,22 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataLogResponse)
diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py
index d9e1d914..4487f88f 100644
--- a/src/humanloop/types/paginated_data_prompt_response.py
+++ b/src/humanloop/types/paginated_data_prompt_response.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataPromptResponse(UncheckedBaseModel):
- records: typing.List[PromptResponse]
+ records: typing.List["PromptResponse"]
page: int
size: int
total: int
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataPromptResponse)
diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py
index e2962e87..131ddb69 100644
--- a/src/humanloop/types/paginated_data_tool_response.py
+++ b/src/humanloop/types/paginated_data_tool_response.py
@@ -1,22 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class PaginatedDataToolResponse(UncheckedBaseModel):
- records: typing.List[ToolResponse]
+ records: typing.List["ToolResponse"]
page: int
size: int
total: int
@@ -29,3 +23,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedDataToolResponse)
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
index 87d5b603..f6ee4be8 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response.py
@@ -1,21 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item import (
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem,
)
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse(
@@ -36,3 +30,18 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(
+ PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
+)
diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
index a1b4f056..ee28a684 100644
--- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
+++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response_records_item.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
+
+from .agent_response import AgentResponse
from .dataset_response import DatasetResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
-from .agent_response import AgentResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponseRecordsItem = (
typing.Union[PromptResponse, ToolResponse, DatasetResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/paginated_datapoint_response.py b/src/humanloop/types/paginated_datapoint_response.py
index 97e8ec4e..c82aa987 100644
--- a/src/humanloop/types/paginated_datapoint_response.py
+++ b/src/humanloop/types/paginated_datapoint_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .datapoint_response import DatapointResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .datapoint_response import DatapointResponse
class PaginatedDatapointResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/paginated_dataset_response.py b/src/humanloop/types/paginated_dataset_response.py
index f6f2e20e..689c7276 100644
--- a/src/humanloop/types/paginated_dataset_response.py
+++ b/src/humanloop/types/paginated_dataset_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .dataset_response import DatasetResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .dataset_response import DatasetResponse
class PaginatedDatasetResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py
index 16232e0b..b9efe745 100644
--- a/src/humanloop/types/paginated_evaluation_response.py
+++ b/src/humanloop/types/paginated_evaluation_response.py
@@ -1,19 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+from __future__ import annotations
+
import typing
-from .evaluation_response import EvaluationResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_response import EvaluationResponse
class PaginatedEvaluationResponse(UncheckedBaseModel):
@@ -30,3 +24,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PaginatedEvaluationResponse)
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index efcd1d0c..b27a3a90 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -1,35 +1,29 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import pydantic
+from __future__ import annotations
+
+import datetime as dt
import typing
+
+import pydantic
+import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .environment_response import EnvironmentResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from .input_response import InputResponse
+from .linked_tool_response import LinkedToolResponse
from .model_endpoints import ModelEndpoints
-from .populate_template_response_template import PopulateTemplateResponseTemplate
-from .template_language import TemplateLanguage
from .model_providers import ModelProviders
+from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
+from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
+from .populate_template_response_template import PopulateTemplateResponseTemplate
from .response_format import ResponseFormat
-from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
+from .template_language import TemplateLanguage
from .tool_function import ToolFunction
-from .linked_tool_response import LinkedToolResponse
-import typing_extensions
-from ..core.serialization import FieldMetadata
-from .environment_response import EnvironmentResponse
-import datetime as dt
from .user_response import UserResponse
-from .input_response import InputResponse
-from .evaluator_aggregate import EvaluatorAggregate
-from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PopulateTemplateResponse(UncheckedBaseModel):
@@ -221,7 +215,7 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Inputs associated to the Prompt. Inputs correspond to any of the variables used within the Prompt template.
"""
- evaluators: typing.Optional[typing.List[MonitoringEvaluatorResponse]] = pydantic.Field(default=None)
+ evaluators: typing.Optional[typing.List["MonitoringEvaluatorResponse"]] = pydantic.Field(default=None)
"""
Evaluators that have been attached to this Prompt that are used for monitoring logs.
"""
@@ -231,6 +225,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
@@ -244,3 +243,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PopulateTemplateResponse)
diff --git a/src/humanloop/types/populate_template_response_populated_template.py b/src/humanloop/types/populate_template_response_populated_template.py
index cafdf1a1..21a714b9 100644
--- a/src/humanloop/types/populate_template_response_populated_template.py
+++ b/src/humanloop/types/populate_template_response_populated_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
PopulateTemplateResponsePopulatedTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/populate_template_response_reasoning_effort.py b/src/humanloop/types/populate_template_response_reasoning_effort.py
index 8dd9f7f6..af02db55 100644
--- a/src/humanloop/types/populate_template_response_reasoning_effort.py
+++ b/src/humanloop/types/populate_template_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .open_ai_reasoning_effort import OpenAiReasoningEffort
PopulateTemplateResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/populate_template_response_template.py b/src/humanloop/types/populate_template_response_template.py
index 646b5876..d3b10e2e 100644
--- a/src/humanloop/types/populate_template_response_template.py
+++ b/src/humanloop/types/populate_template_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
PopulateTemplateResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/project_sort_by.py b/src/humanloop/types/project_sort_by.py
deleted file mode 100644
index b8265b56..00000000
--- a/src/humanloop/types/project_sort_by.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProjectSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/prompt_call_log_response.py b/src/humanloop/types/prompt_call_log_response.py
index 68b6ae1a..2ec71bf1 100644
--- a/src/humanloop/types/prompt_call_log_response.py
+++ b/src/humanloop/types/prompt_call_log_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import datetime as dt
import typing
+
import pydantic
-import datetime as dt
-from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message import ChatMessage
class PromptCallLogResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index ec74437f..f20ce5f6 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -1,23 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
+from __future__ import annotations
+
import datetime as dt
+import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
from .log_status import LogStatus
from .prompt_call_log_response import PromptCallLogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
class PromptCallResponse(UncheckedBaseModel):
@@ -49,7 +43,7 @@ class PromptCallResponse(UncheckedBaseModel):
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
"""
- prompt: PromptResponse = pydantic.Field()
+ prompt: "PromptResponse" = pydantic.Field()
"""
Prompt used to generate the Log.
"""
@@ -127,3 +121,16 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+
+update_forward_refs(PromptCallResponse)
diff --git a/src/humanloop/types/prompt_call_response_tool_choice.py b/src/humanloop/types/prompt_call_response_tool_choice.py
index d0367656..7cb07ccc 100644
--- a/src/humanloop/types/prompt_call_response_tool_choice.py
+++ b/src/humanloop/types/prompt_call_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoice
PromptCallResponseToolChoice = typing.Union[
diff --git a/src/humanloop/types/prompt_call_stream_response.py b/src/humanloop/types/prompt_call_stream_response.py
index 4ffb09bf..48fffdee 100644
--- a/src/humanloop/types/prompt_call_stream_response.py
+++ b/src/humanloop/types/prompt_call_stream_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import datetime as dt
import typing
+
import pydantic
-import datetime as dt
-from .chat_message import ChatMessage
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message import ChatMessage
class PromptCallStreamResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/prompt_kernel_request.py b/src/humanloop/types/prompt_kernel_request.py
index 80ba5ed5..03e5c624 100644
--- a/src/humanloop/types/prompt_kernel_request.py
+++ b/src/humanloop/types/prompt_kernel_request.py
@@ -1,17 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
from .model_endpoints import ModelEndpoints
-from .prompt_kernel_request_template import PromptKernelRequestTemplate
-from .template_language import TemplateLanguage
from .model_providers import ModelProviders
+from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
from .prompt_kernel_request_stop import PromptKernelRequestStop
+from .prompt_kernel_request_template import PromptKernelRequestTemplate
from .response_format import ResponseFormat
-from .prompt_kernel_request_reasoning_effort import PromptKernelRequestReasoningEffort
+from .template_language import TemplateLanguage
from .tool_function import ToolFunction
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class PromptKernelRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
index dda61bb4..b5fb8879 100644
--- a/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
+++ b/src/humanloop/types/prompt_kernel_request_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .open_ai_reasoning_effort import OpenAiReasoningEffort
PromptKernelRequestReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_kernel_request_template.py b/src/humanloop/types/prompt_kernel_request_template.py
index 9e6918b7..59cf99d3 100644
--- a/src/humanloop/types/prompt_kernel_request_template.py
+++ b/src/humanloop/types/prompt_kernel_request_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
PromptKernelRequestTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index a9e26318..8bea9781 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -1,24 +1,16 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
+
+import datetime as dt
import typing
-from .chat_message import ChatMessage
+
import pydantic
-from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
-import datetime as dt
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .chat_message import ChatMessage
from .log_status import LogStatus
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
class PromptLogResponse(UncheckedBaseModel):
@@ -75,7 +67,7 @@ class PromptLogResponse(UncheckedBaseModel):
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
"""
- prompt: PromptResponse = pydantic.Field()
+ prompt: "PromptResponse" = pydantic.Field()
"""
Prompt used to generate the Log.
"""
@@ -215,10 +207,19 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_log_response import AgentLogResponse # noqa: E402
-from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
-from .flow_log_response import FlowLogResponse # noqa: E402
-from .tool_log_response import ToolLogResponse # noqa: E402
-from .log_response import LogResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
update_forward_refs(PromptLogResponse)
diff --git a/src/humanloop/types/prompt_log_response_tool_choice.py b/src/humanloop/types/prompt_log_response_tool_choice.py
index 07005081..e7acf4bb 100644
--- a/src/humanloop/types/prompt_log_response_tool_choice.py
+++ b/src/humanloop/types/prompt_log_response_tool_choice.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .tool_choice import ToolChoice
PromptLogResponseToolChoice = typing.Union[
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 5d6ff870..1a2b1490 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -1,27 +1,28 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
+
+import pydantic
+import typing_extensions
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .environment_response import EnvironmentResponse
+from .evaluator_aggregate import EvaluatorAggregate
+from .input_response import InputResponse
+from .linked_tool_response import LinkedToolResponse
from .model_endpoints import ModelEndpoints
-from .prompt_response_template import PromptResponseTemplate
-from .template_language import TemplateLanguage
from .model_providers import ModelProviders
+from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
from .prompt_response_stop import PromptResponseStop
+from .prompt_response_template import PromptResponseTemplate
from .response_format import ResponseFormat
-from .prompt_response_reasoning_effort import PromptResponseReasoningEffort
+from .template_language import TemplateLanguage
from .tool_function import ToolFunction
-from .linked_tool_response import LinkedToolResponse
-import typing_extensions
-from ..core.serialization import FieldMetadata
-from .environment_response import EnvironmentResponse
-import datetime as dt
from .user_response import UserResponse
-from .input_response import InputResponse
-from .evaluator_aggregate import EvaluatorAggregate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class PromptResponse(UncheckedBaseModel):
@@ -223,6 +224,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
@@ -233,13 +239,13 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
update_forward_refs(PromptResponse)
diff --git a/src/humanloop/types/prompt_response_reasoning_effort.py b/src/humanloop/types/prompt_response_reasoning_effort.py
index e136637f..86e9e7ad 100644
--- a/src/humanloop/types/prompt_response_reasoning_effort.py
+++ b/src/humanloop/types/prompt_response_reasoning_effort.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .open_ai_reasoning_effort import OpenAiReasoningEffort
PromptResponseReasoningEffort = typing.Union[OpenAiReasoningEffort, int]
diff --git a/src/humanloop/types/prompt_response_template.py b/src/humanloop/types/prompt_response_template.py
index 2c474650..8a89bc04 100644
--- a/src/humanloop/types/prompt_response_template.py
+++ b/src/humanloop/types/prompt_response_template.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
import typing
+
from .chat_message import ChatMessage
PromptResponseTemplate = typing.Union[str, typing.List[ChatMessage]]
diff --git a/src/humanloop/types/provider_api_keys.py b/src/humanloop/types/provider_api_keys.py
index 548e7e20..49bf8731 100644
--- a/src/humanloop/types/provider_api_keys.py
+++ b/src/humanloop/types/provider_api_keys.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
+import pydantic
import typing_extensions
-from ..core.serialization import FieldMetadata
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
+from ..core.serialization import FieldMetadata
+from ..core.unchecked_base_model import UncheckedBaseModel
class ProviderApiKeys(UncheckedBaseModel):
diff --git a/src/humanloop/types/response_format.py b/src/humanloop/types/response_format.py
index ba5187f7..287019c4 100644
--- a/src/humanloop/types/response_format.py
+++ b/src/humanloop/types/response_format.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .response_format_type import ResponseFormatType
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .response_format_type import ResponseFormatType
class ResponseFormat(UncheckedBaseModel):
diff --git a/src/humanloop/types/run_stats_response.py b/src/humanloop/types/run_stats_response.py
index dbc1be73..3e385b26 100644
--- a/src/humanloop/types/run_stats_response.py
+++ b/src/humanloop/types/run_stats_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem
-from .evaluation_status import EvaluationStatus
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .evaluation_status import EvaluationStatus
+from .run_stats_response_evaluator_stats_item import RunStatsResponseEvaluatorStatsItem
class RunStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/run_stats_response_evaluator_stats_item.py b/src/humanloop/types/run_stats_response_evaluator_stats_item.py
index c7fe6056..697efb12 100644
--- a/src/humanloop/types/run_stats_response_evaluator_stats_item.py
+++ b/src/humanloop/types/run_stats_response_evaluator_stats_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
+
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
+from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .select_evaluator_stats_response import SelectEvaluatorStatsResponse
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
diff --git a/src/humanloop/types/run_version_response.py b/src/humanloop/types/run_version_response.py
index 770dc487..703bea5f 100644
--- a/src/humanloop/types/run_version_response.py
+++ b/src/humanloop/types/run_version_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
+
+from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
-from .agent_response import AgentResponse
+from .prompt_response import PromptResponse
+from .tool_response import ToolResponse
RunVersionResponse = typing.Union[PromptResponse, ToolResponse, EvaluatorResponse, FlowResponse, AgentResponse]
diff --git a/src/humanloop/types/select_evaluator_stats_response.py b/src/humanloop/types/select_evaluator_stats_response.py
index bd98ead6..14068fef 100644
--- a/src/humanloop/types/select_evaluator_stats_response.py
+++ b/src/humanloop/types/select_evaluator_stats_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class SelectEvaluatorStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/text_chat_content.py b/src/humanloop/types/text_chat_content.py
index 81d00eaa..0db5d057 100644
--- a/src/humanloop/types/text_chat_content.py
+++ b/src/humanloop/types/text_chat_content.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class TextChatContent(UncheckedBaseModel):
diff --git a/src/humanloop/types/text_evaluator_stats_response.py b/src/humanloop/types/text_evaluator_stats_response.py
index 652c7aa6..bb38f996 100644
--- a/src/humanloop/types/text_evaluator_stats_response.py
+++ b/src/humanloop/types/text_evaluator_stats_response.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class TextEvaluatorStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/tool_call.py b/src/humanloop/types/tool_call.py
index 373a36b0..11c9d7e9 100644
--- a/src/humanloop/types/tool_call.py
+++ b/src/humanloop/types/tool_call.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_tool_type import ChatToolType
from .function_tool import FunctionTool
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-import pydantic
class ToolCall(UncheckedBaseModel):
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
index 55bf2712..d3b660e1 100644
--- a/src/humanloop/types/tool_call_response.py
+++ b/src/humanloop/types/tool_call_response.py
@@ -1,26 +1,14 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
+from __future__ import annotations
+
import datetime as dt
+import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
class ToolCallResponse(UncheckedBaseModel):
@@ -38,7 +26,7 @@ class ToolCallResponse(UncheckedBaseModel):
When the logged event ended.
"""
- tool: ToolResponse = pydantic.Field()
+ tool: "ToolResponse" = pydantic.Field()
"""
Tool used to generate the Log.
"""
@@ -138,7 +126,7 @@ class ToolCallResponse(UncheckedBaseModel):
ID of the log.
"""
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
+ evaluator_logs: typing.List["EvaluatorLogResponse"] = pydantic.Field()
"""
List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
"""
@@ -153,7 +141,7 @@ class ToolCallResponse(UncheckedBaseModel):
ID of the Trace containing the Tool Call Log.
"""
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
+ trace_children: typing.Optional[typing.List["LogResponse"]] = pydantic.Field(default=None)
"""
Logs nested under this Log in the Trace.
"""
@@ -166,3 +154,22 @@ class Config:
frozen = True
smart_union = True
extra = pydantic.Extra.allow
+
+
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .tool_log_response import ToolLogResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
+
+update_forward_refs(ToolCallResponse)
diff --git a/src/humanloop/types/tool_choice.py b/src/humanloop/types/tool_choice.py
index a4dc5721..fad59550 100644
--- a/src/humanloop/types/tool_choice.py
+++ b/src/humanloop/types/tool_choice.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_tool_type import ChatToolType
from .function_tool_choice import FunctionToolChoice
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-import pydantic
class ToolChoice(UncheckedBaseModel):
diff --git a/src/humanloop/types/tool_function.py b/src/humanloop/types/tool_function.py
index dc42d02f..e0a29165 100644
--- a/src/humanloop/types/tool_function.py
+++ b/src/humanloop/types/tool_function.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class ToolFunction(UncheckedBaseModel):
diff --git a/src/humanloop/types/tool_kernel_request.py b/src/humanloop/types/tool_kernel_request.py
index ed372dd2..3e2d4afe 100644
--- a/src/humanloop/types/tool_kernel_request.py
+++ b/src/humanloop/types/tool_kernel_request.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .tool_function import ToolFunction
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .tool_function import ToolFunction
class ToolKernelRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 251223af..2524eb5b 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -1,23 +1,15 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
+
import datetime as dt
+import typing
+
import pydantic
-from .log_status import LogStatus
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .log_status import LogStatus
class ToolLogResponse(UncheckedBaseModel):
@@ -150,7 +142,7 @@ class ToolLogResponse(UncheckedBaseModel):
Logs nested under this Log in the Trace.
"""
- tool: ToolResponse = pydantic.Field()
+ tool: "ToolResponse" = pydantic.Field()
"""
Tool used to generate the Log.
"""
@@ -170,10 +162,19 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_log_response import AgentLogResponse # noqa: E402
-from .evaluator_log_response import EvaluatorLogResponse # noqa: E402
-from .flow_log_response import FlowLogResponse # noqa: E402
-from .prompt_log_response import PromptLogResponse # noqa: E402
-from .log_response import LogResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_log_response import AgentLogResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_log_response import EvaluatorLogResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_log_response import FlowLogResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_log_response import PromptLogResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .log_response import LogResponse # noqa: E402, F401, I001
update_forward_refs(ToolLogResponse)
diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py
index 70537215..b2bca04b 100644
--- a/src/humanloop/types/tool_response.py
+++ b/src/humanloop/types/tool_response.py
@@ -1,18 +1,19 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
+import datetime as dt
import typing
-from .tool_function import ToolFunction
-from .files_tool_type import FilesToolType
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .environment_response import EnvironmentResponse
-import datetime as dt
-from .user_response import UserResponse
-from .input_response import InputResponse
from .evaluator_aggregate import EvaluatorAggregate
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+from .files_tool_type import FilesToolType
+from .input_response import InputResponse
+from .tool_function import ToolFunction
+from .user_response import UserResponse
class ToolResponse(UncheckedBaseModel):
@@ -152,13 +153,13 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
update_forward_refs(ToolResponse)
diff --git a/src/humanloop/types/update_version_request.py b/src/humanloop/types/update_version_request.py
index 90f4f488..0587c889 100644
--- a/src/humanloop/types/update_version_request.py
+++ b/src/humanloop/types/update_version_request.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
class UpdateVersionRequest(UncheckedBaseModel):
diff --git a/src/humanloop/types/validation_error.py b/src/humanloop/types/validation_error.py
index 72b616e6..0438bc05 100644
--- a/src/humanloop/types/validation_error.py
+++ b/src/humanloop/types/validation_error.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
import typing
-from .validation_error_loc_item import ValidationErrorLocItem
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .validation_error_loc_item import ValidationErrorLocItem
class ValidationError(UncheckedBaseModel):
diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py
index 0db57d69..fdee59de 100644
--- a/src/humanloop/types/version_deployment_response.py
+++ b/src/humanloop/types/version_deployment_response.py
@@ -1,12 +1,13 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
+
+import typing
+
import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
from .environment_response import EnvironmentResponse
-import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
class VersionDeploymentResponse(UncheckedBaseModel):
@@ -36,14 +37,14 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_id_response import VersionIdResponse # noqa: E402
-from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_id_response import VersionIdResponse # noqa: E402, F401, I001
+from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402, F401, I001
update_forward_refs(VersionDeploymentResponse)
diff --git a/src/humanloop/types/version_deployment_response_file.py b/src/humanloop/types/version_deployment_response_file.py
index 4fadcff0..130f2c1c 100644
--- a/src/humanloop/types/version_deployment_response_file.py
+++ b/src/humanloop/types/version_deployment_response_file.py
@@ -1,16 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponse
-import typing
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponse
- from .tool_response import ToolResponse
+ from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
- from .agent_response import AgentResponse
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
VersionDeploymentResponseFile = typing.Union[
"PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_id.py b/src/humanloop/types/version_id.py
index 635aee49..51de3db1 100644
--- a/src/humanloop/types/version_id.py
+++ b/src/humanloop/types/version_id.py
@@ -1,9 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
+import typing
+
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
+from ..core.unchecked_base_model import UncheckedBaseModel
class VersionId(UncheckedBaseModel):
diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py
index e3f5dc27..47aa53db 100644
--- a/src/humanloop/types/version_id_response.py
+++ b/src/humanloop/types/version_id_response.py
@@ -1,11 +1,12 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
+
import typing
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.pydantic_utilities import update_forward_refs
+
+import pydantic
+from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
+from ..core.unchecked_base_model import UncheckedBaseModel
class VersionIdResponse(UncheckedBaseModel):
@@ -30,14 +31,14 @@ class Config:
extra = pydantic.Extra.allow
-from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402
-from .agent_response import AgentResponse # noqa: E402
-from .evaluator_response import EvaluatorResponse # noqa: E402
-from .flow_response import FlowResponse # noqa: E402
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402
-from .prompt_response import PromptResponse # noqa: E402
-from .tool_response import ToolResponse # noqa: E402
-from .version_deployment_response import VersionDeploymentResponse # noqa: E402
-from .version_id_response_version import VersionIdResponseVersion # noqa: E402
+from .agent_linked_file_response import AgentLinkedFileResponse # noqa: E402, F401, I001
+from .agent_response import AgentResponse # noqa: E402, F401, I001
+from .evaluator_response import EvaluatorResponse # noqa: E402, F401, I001
+from .flow_response import FlowResponse # noqa: E402, F401, I001
+from .monitoring_evaluator_response import MonitoringEvaluatorResponse # noqa: E402, F401, I001
+from .prompt_response import PromptResponse # noqa: E402, F401, I001
+from .tool_response import ToolResponse # noqa: E402, F401, I001
+from .version_deployment_response import VersionDeploymentResponse # noqa: E402, F401, I001
+from .version_id_response_version import VersionIdResponseVersion # noqa: E402, F401, I001
update_forward_refs(VersionIdResponse)
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 1b74199f..eff8378c 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -1,20 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
+
from .dataset_response import DatasetResponse
if typing.TYPE_CHECKING:
- from .prompt_response import PromptResponse
- from .tool_response import ToolResponse
+ from .agent_response import AgentResponse
from .evaluator_response import EvaluatorResponse
from .flow_response import FlowResponse
- from .agent_response import AgentResponse
+ from .prompt_response import PromptResponse
+ from .tool_response import ToolResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse",
- "ToolResponse",
- DatasetResponse,
- "EvaluatorResponse",
- "FlowResponse",
- "AgentResponse",
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py
index a6a7783c..7785a8f1 100644
--- a/src/humanloop/types/version_reference_response.py
+++ b/src/humanloop/types/version_reference_response.py
@@ -1,6 +1,7 @@
# This file was auto-generated by Fern from our API Definition.
from __future__ import annotations
+
import typing
if typing.TYPE_CHECKING:
diff --git a/src/humanloop/types/version_stats_response.py b/src/humanloop/types/version_stats_response.py
index 6439fca4..be1c6286 100644
--- a/src/humanloop/types/version_stats_response.py
+++ b/src/humanloop/types/version_stats_response.py
@@ -1,10 +1,11 @@
# This file was auto-generated by Fern from our API Definition.
-from ..core.unchecked_base_model import UncheckedBaseModel
-import pydantic
import typing
-from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem
+
+import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2
+from ..core.unchecked_base_model import UncheckedBaseModel
+from .version_stats_response_evaluator_version_stats_item import VersionStatsResponseEvaluatorVersionStatsItem
class VersionStatsResponse(UncheckedBaseModel):
diff --git a/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py b/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py
index 2ff56505..a7b9fb21 100644
--- a/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py
+++ b/src/humanloop/types/version_stats_response_evaluator_version_stats_item.py
@@ -1,8 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
+
from .boolean_evaluator_stats_response import BooleanEvaluatorStatsResponse
+from .numeric_evaluator_stats_response import NumericEvaluatorStatsResponse
from .select_evaluator_stats_response import SelectEvaluatorStatsResponse
from .text_evaluator_stats_response import TextEvaluatorStatsResponse
From 0134c193fb82af172f0857d302d6272b81411221 Mon Sep 17 00:00:00 2001
From: James Baskerville
Date: Fri, 9 May 2025 16:28:14 +0100
Subject: [PATCH 2/2] Add type ignore
---
src/humanloop/core/pydantic_utilities.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/humanloop/core/pydantic_utilities.py b/src/humanloop/core/pydantic_utilities.py
index 60a2c713..0360ef49 100644
--- a/src/humanloop/core/pydantic_utilities.py
+++ b/src/humanloop/core/pydantic_utilities.py
@@ -181,7 +181,7 @@ def deep_union_pydantic_dicts(source: Dict[str, Any], destination: Dict[str, Any
if IS_PYDANTIC_V2:
- class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[name-defined, type-arg]
+ class V2RootModel(UniversalBaseModel, pydantic.RootModel): # type: ignore[misc, name-defined, type-arg]
pass
UniversalRootModel: TypeAlias = V2RootModel # type: ignore[misc]