diff --git a/pyproject.toml b/pyproject.toml
index cf85732..33f3210 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.40b3"
+version = "0.8.40b5"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 242b5db..07525c3 100644
--- a/reference.md
+++ b/reference.md
@@ -282,14 +282,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -587,14 +579,6 @@ Controls how the model uses tools. The following options are supported:
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -772,14 +756,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1044,14 +1020,6 @@ A new Prompt version will be created if the provided details do not match any ex
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2690,14 +2658,6 @@ client.tools.call()
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -2952,14 +2912,6 @@ client.tools.log(path='math-tool', tool={'function': {'name': 'multiply', 'descr
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -3179,14 +3131,6 @@ client.tools.update(id='id', log_id='log_id', )
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5779,14 +5723,6 @@ client.evaluators.log(parent_id='parent_id', )
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -6883,9 +6819,6 @@ Log to a Flow.
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
-
-If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
-in order to trigger Evaluators.
@@ -7074,7 +7007,7 @@ client.flows.log(id='fl_6o701g4jmcanPVHxdqD0O', flow={'attributes': {'prompt': {
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
@@ -7249,7 +7182,7 @@ client.flows.update_log(log_id='medqa_experiment_0001', inputs={'question': 'Pat
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
@@ -8551,14 +8484,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -8722,7 +8647,7 @@ client.agents.update_log(id='ag_1234567890', log_id='log_1234567890', messages=[
-
-**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+**log_status:** `typing.Optional[LogStatus]` — Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
@@ -8910,14 +8835,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -9168,14 +9085,6 @@ A new Agent version will be created if the provided details do not match any exi
-
-**log_status:** `typing.Optional[LogStatus]` — Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
-
-
-
-
--
-
**source_datapoint_id:** `typing.Optional[str]` — Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index ab7b887..49772da 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -92,7 +92,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -200,9 +199,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -267,7 +263,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -320,7 +315,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -364,7 +359,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -377,112 +371,113 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[AgentCallStreamResponse]
+ Yields
+ ------
+ typing.Iterator[AgentCallStreamResponse]
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- response = client.agents.call_stream()
- for chunk in response:
- yield chunk
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.agents.call_stream()
+ for chunk in response:
+ yield chunk
"""
with self._raw_client.call_stream(
version_id=version_id,
@@ -497,7 +492,6 @@ def call_stream(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -526,7 +520,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -539,110 +532,111 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AgentCallResponse
+ Returns
+ -------
+ AgentCallResponse
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
"""
_response = self._raw_client.call(
version_id=version_id,
@@ -657,7 +651,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1509,7 +1502,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1519,145 +1511,146 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> CreateAgentLogResponse:
"""
- Create an Agent Log.
+ Create an Agent Log.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
- If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentLogRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- error : typing.Optional[str]
- Error message if the log is an error.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agent_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- CreateAgentLogResponse
- Successful Response
+ Returns
+ -------
+ CreateAgentLogResponse
+ Successful Response
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
- , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
- , 'additionalProperties': False
- , 'required': ['output']
- }, 'strict': True}, 'on_agent_call': "stop"}]}, )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.agents.log(path='Banking/Teller Agent', agent={'provider': "anthropic", 'endpoint': "chat", 'model': 'claude-3-7-sonnet-latest', 'reasoning_effort': 1024, 'template': [{'role': "system", 'content': 'You are a helpful digital assistant, helping users navigate our digital banking platform.'}], 'max_iterations': 3, 'tools': [{'type': 'file', 'link': {'file_id': 'pr_1234567890', 'version_id': 'prv_1234567890'}, 'on_agent_call': "continue"}, {'type': 'inline', 'json_schema': {'name': 'stop', 'description': 'Call this tool when you have finished your task.', 'parameters': {'type': 'object'
+ , 'properties': {'output': {'type': 'string', 'description': 'The final output to return to the user.'}}
+ , 'additionalProperties': False
+ , 'required': ['output']
+ }, 'strict': True}, 'on_agent_call': "stop"}]}, )
+ asyncio.run(main())
"""
_response = await self._raw_client.log(
version_id=version_id,
@@ -1687,7 +1680,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1740,7 +1732,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1787,7 +1779,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1800,115 +1791,116 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AgentCallStreamResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AgentCallStreamResponse]
+ Yields
+ ------
+ typing.AsyncIterator[AgentCallStreamResponse]
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- response = await client.agents.call_stream()
- async for chunk in response:
- yield chunk
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ response = await client.agents.call_stream()
+ async for chunk in response:
+ yield chunk
+ asyncio.run(main())
"""
async with self._raw_client.call_stream(
version_id=version_id,
@@ -1923,7 +1915,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1953,7 +1944,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1966,113 +1956,114 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AgentCallResponse:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AgentCallResponse
+ Returns
+ -------
+ AgentCallResponse
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.agents.call(path='Banking/Teller Agent', messages=[{'role': "user", 'content': "I'd like to deposit $1000 to my savings account from my checking account."}], )
+ asyncio.run(main())
"""
_response = await self._raw_client.call(
version_id=version_id,
@@ -2087,7 +2078,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index e577f8c..5b91f95 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -92,7 +92,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -200,9 +199,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -270,7 +266,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -352,7 +347,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -425,7 +420,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -438,103 +432,104 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[AgentCallStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -561,7 +556,6 @@ def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -640,7 +634,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -653,103 +646,104 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- HttpResponse[AgentCallResponse]
+ Returns
+ -------
+ HttpResponse[AgentCallResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -776,7 +770,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2071,7 +2064,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2081,132 +2073,133 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreateAgentLogResponse]:
"""
- Create an Agent Log.
+ Create an Agent Log.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise, the default deployed version will be chosen.
- If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
+ If you create the Agent Log with a `log_status` of `incomplete`, you should later update it to `complete`
+ in order to trigger Evaluators.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentLogRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- error : typing.Optional[str]
- Error message if the log is an error.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agent_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agent_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[CreateAgentLogResponse]
- Successful Response
+ Returns
+ -------
+ AsyncHttpResponse[CreateAgentLogResponse]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"agents/log",
@@ -2249,7 +2242,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2331,7 +2323,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -2404,7 +2396,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2417,103 +2408,104 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallStreamRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[AgentCallStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2540,7 +2532,6 @@ async def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2619,7 +2610,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2632,103 +2622,104 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[AgentCallResponse]:
"""
- Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
+ Call an Agent. The Agent will run on the Humanloop runtime and return a completed Agent Log.
- If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
- pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
+ If the Agent requires a tool call that cannot be ran by Humanloop, execution will halt. To continue,
+ pass the ID of the incomplete Log and the required tool call to the /agents/continue endpoint.
- The agent will run for the maximum number of iterations, or until it encounters a stop condition,
- according to its configuration.
+ The agent will run for the maximum number of iterations, or until it encounters a stop condition,
+ according to its configuration.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Agent. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Agent. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Agent details in the request body. A new version is created if it does not match
- any existing ones. This is helpful in the case where you are storing or deriving
- your Agent details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Agent details in the request body. A new version is created if it does not match
+ any existing ones. This is helpful in the case where you are storing or deriving
+ your Agent details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Agent to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Agent to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Agent, including the name. This locates the Agent in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Agent.
+ id : typing.Optional[str]
+ ID for an existing Agent.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[AgentsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentsCallRequestAgentParams]
- The Agent configuration to use. Two formats are supported:
- - An object representing the details of the Agent configuration
- - A string representing the raw contents of a .agent file
- A new Agent version will be created if the provided details do not match any existing version.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Agent version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- agents_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ agents_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- include_trace_children : typing.Optional[bool]
- If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
+ include_trace_children : typing.Optional[bool]
+ If true, populate `trace_children` for the returned Agent Log. Only applies when not streaming. Defaults to false.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[AgentCallResponse]
+ Returns
+ -------
+ AsyncHttpResponse[AgentCallResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2755,7 +2746,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 7b49ac9..5edb291 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.40b3",
+ "User-Agent": "humanloop/0.8.40b5",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.40b3",
+ "X-Fern-SDK-Version": "0.8.40b5",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 69fff10..78d0f82 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -18,7 +18,6 @@
from ..types.file_environment_response import FileEnvironmentResponse
from ..types.file_sort_by import FileSortBy
from ..types.list_evaluators import ListEvaluators
-from ..types.log_status import LogStatus
from ..types.sort_order import SortOrder
from .raw_client import AsyncRawEvaluatorsClient, RawEvaluatorsClient
from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
@@ -64,7 +63,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -135,9 +133,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -199,7 +194,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -735,7 +729,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -806,9 +799,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -873,7 +863,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py
index 8aeb32b..5a64aea 100644
--- a/src/humanloop/evaluators/raw_client.py
+++ b/src/humanloop/evaluators/raw_client.py
@@ -26,7 +26,6 @@
from ..types.file_sort_by import FileSortBy
from ..types.http_validation_error import HttpValidationError
from ..types.list_evaluators import ListEvaluators
-from ..types.log_status import LogStatus
from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse
from ..types.sort_order import SortOrder
from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams
@@ -61,7 +60,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -132,9 +130,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -194,7 +189,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"parent_id": parent_id,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
@@ -1047,7 +1041,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1118,9 +1111,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1180,7 +1170,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"parent_id": parent_id,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index 8fae236..f4c5b23 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -81,9 +81,6 @@ def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -144,7 +141,7 @@ def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -258,7 +255,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -837,9 +834,6 @@ async def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -900,7 +894,7 @@ async def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1017,7 +1011,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py
index e395457..8a82714 100644
--- a/src/humanloop/flows/raw_client.py
+++ b/src/humanloop/flows/raw_client.py
@@ -78,9 +78,6 @@ def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -141,7 +138,7 @@ def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -285,7 +282,7 @@ def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1166,9 +1163,6 @@ async def log(
You can use query parameters `version_id`, or `environment`, to target
an existing version of the Flow. Otherwise, the default deployed version will be chosen.
- If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete`
- in order to trigger Evaluators.
-
Parameters
----------
version_id : typing.Optional[str]
@@ -1229,7 +1223,7 @@ async def log(
Any additional metadata to record.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace.
+ Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, no more Logs can be added to it.
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1373,7 +1367,7 @@ async def update_log(
The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`.
log_status : typing.Optional[LogStatus]
- Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index f05a6a5..b682e72 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -238,6 +238,17 @@ def _overload_call(self: T, file_syncer: Optional[FileSyncer], use_local_files:
raise HumanloopRuntimeError from e
+ClientTemplateType = TypeVar(
+ "ClientTemplateType",
+ bound=Union[
+ FlowsClient,
+ PromptsClient,
+ AgentsClient,
+ ToolsClient,
+ ],
+)
+
+
def overload_client(
client: T,
file_syncer: Optional[FileSyncer] = None,
@@ -253,8 +264,7 @@ def overload_client(
def log_wrapper(self: T, **kwargs) -> LogResponseType:
return _overload_log(self, file_syncer, use_local_files, **kwargs)
- # Replace the log method with type ignore
- client.log = types.MethodType(log_wrapper, client) # type: ignore
+ client.log = types.MethodType(log_wrapper, client) # type: ignore [method-assign, union-attr]
# Overload call method for Prompt and Agent clients
if _get_file_type_from_client(client) in FileSyncer.SERIALIZABLE_FILE_TYPES:
@@ -262,14 +272,13 @@ def log_wrapper(self: T, **kwargs) -> LogResponseType:
logger.error("file_syncer is None but client has call method and use_local_files=%s", use_local_files)
raise HumanloopRuntimeError("file_syncer is required for clients that support call operations")
if hasattr(client, "call") and not hasattr(client, "_call"):
- # Store original method with type ignore
- client._call = client.call # type: ignore
+ client._call = client.call # type: ignore [method-assign, union-attr]
# Create a closure to capture file_syncer and use_local_files
def call_wrapper(self: T, **kwargs) -> CallResponseType:
return _overload_call(self, file_syncer, use_local_files, **kwargs)
# Replace the call method with type ignore
- client.call = types.MethodType(call_wrapper, client) # type: ignore
+ client.call = types.MethodType(call_wrapper, client) # type: ignore [method-assign]
return client
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index cd772a1..a99ba8e 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -21,7 +21,6 @@
from ..types.file_sort_by import FileSortBy
from ..types.list_prompts import ListPrompts
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.model_endpoints import ModelEndpoints
from ..types.model_providers import ModelProviders
from ..types.populate_template_response import PopulateTemplateResponse
@@ -92,7 +91,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -202,9 +200,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -267,7 +262,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -304,7 +298,6 @@ def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -387,9 +380,6 @@ def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -428,7 +418,6 @@ def update_log(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -448,7 +437,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -463,115 +451,116 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[PromptCallStreamResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[PromptCallStreamResponse]
+ Yields
+ ------
+ typing.Iterator[PromptCallStreamResponse]
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- response = client.prompts.call_stream()
- for chunk in response:
- yield chunk
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ response = client.prompts.call_stream()
+ for chunk in response:
+ yield chunk
"""
with self._raw_client.call_stream(
version_id=version_id,
@@ -586,7 +575,6 @@ def call_stream(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -617,7 +605,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -632,116 +619,117 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> PromptCallResponse:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- PromptCallResponse
+ Returns
+ -------
+ PromptCallResponse
- Examples
- --------
- from humanloop import Humanloop
- client = Humanloop(api_key="YOUR_API_KEY", )
- client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
- , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
- , 'required': []
- }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
+ Examples
+ --------
+ from humanloop import Humanloop
+ client = Humanloop(api_key="YOUR_API_KEY", )
+ client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
"""
_response = self._raw_client.call(
version_id=version_id,
@@ -756,7 +744,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1534,7 +1521,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1544,145 +1530,146 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> CreatePromptLogResponse:
"""
- Log to a Prompt.
+ Log to a Prompt.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptLogRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- error : typing.Optional[str]
- Error message if the log is an error.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompt_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompt_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- CreatePromptLogResponse
- Successful Response
+ Returns
+ -------
+ CreatePromptLogResponse
+ Successful Response
- Examples
- --------
- from humanloop import AsyncHumanloop
- import datetime
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
- }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import datetime
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.prompts.log(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are {{person}}. Answer questions as this person. Do not break character.'}]}, messages=[{'role': "user", 'content': 'What really happened at Roswell?'}], inputs={'person': 'Trump'
+ }, created_at=datetime.datetime.fromisoformat("2024-07-18 23:29:35.178000+00:00", ), provider_latency=6.5931549072265625, output_message={'content': "Well, you know, there is so much secrecy involved in government, folks, it's unbelievable. They don't want to tell you everything. They don't tell me everything! But about Roswell, it's a very popular question. I know, I just know, that something very, very peculiar happened there. Was it a weather balloon? Maybe. Was it something extraterrestrial? Could be. I'd love to go down and open up all the classified documents, believe me, I would. But they don't let that happen. The Deep State, folks, the Deep State. They're unbelievable. They want to keep everything a secret. But whatever the truth is, I can tell you this: it's something big, very very big. Tremendous, in fact.", 'role': "assistant"}, prompt_tokens=100, output_tokens=220, prompt_cost=1e-05, output_cost=0.0002, finish_reason='stop', )
+ asyncio.run(main())
"""
_response = await self._raw_client.log(
version_id=version_id,
@@ -1712,7 +1699,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1749,7 +1735,6 @@ async def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -1832,9 +1817,6 @@ async def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1876,7 +1858,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -1896,7 +1877,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1911,118 +1891,119 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[PromptCallStreamResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[PromptCallStreamResponse]
+ Yields
+ ------
+ typing.AsyncIterator[PromptCallStreamResponse]
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- response = await client.prompts.call_stream()
- async for chunk in response:
- yield chunk
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ response = await client.prompts.call_stream()
+ async for chunk in response:
+ yield chunk
+ asyncio.run(main())
"""
async with self._raw_client.call_stream(
version_id=version_id,
@@ -2037,7 +2018,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -2069,7 +2049,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2084,119 +2063,120 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> PromptCallResponse:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- PromptCallResponse
+ Returns
+ -------
+ PromptCallResponse
- Examples
- --------
- from humanloop import AsyncHumanloop
- import asyncio
- client = AsyncHumanloop(api_key="YOUR_API_KEY", )
- async def main() -> None:
- await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
- , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
- , 'required': []
- }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
- asyncio.run(main())
+ Examples
+ --------
+ from humanloop import AsyncHumanloop
+ import asyncio
+ client = AsyncHumanloop(api_key="YOUR_API_KEY", )
+ async def main() -> None:
+ await client.prompts.call(path='persona', prompt={'model': 'gpt-4', 'template': [{'role': "system", 'content': 'You are stockbot. Return latest prices.'}], 'tools': [{'name': 'get_stock_price', 'description': 'Get current stock price', 'parameters': {'type': 'object'
+ , 'properties': {'ticker_symbol': {'type': 'string', 'name': 'Ticker Symbol', 'description': 'Ticker symbol of the stock'}}
+ , 'required': []
+ }}]}, messages=[{'role': "user", 'content': 'latest apple'}], )
+ asyncio.run(main())
"""
_response = await self._raw_client.call(
version_id=version_id,
@@ -2211,7 +2191,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 5d12b08..eee6710 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -32,7 +32,6 @@
from ..types.http_validation_error import HttpValidationError
from ..types.list_prompts import ListPrompts
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.model_endpoints import ModelEndpoints
from ..types.model_providers import ModelProviders
from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse
@@ -92,7 +91,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -202,9 +200,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -272,7 +267,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -338,7 +332,6 @@ def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LogResponse]:
"""
@@ -421,9 +414,6 @@ def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -463,7 +453,6 @@ def update_log(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -513,7 +502,6 @@ def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -528,106 +516,107 @@ def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]
+ Yields
+ ------
+ typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]
"""
with self._client_wrapper.httpx_client.stream(
@@ -654,7 +643,6 @@ def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -735,7 +723,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -750,106 +737,107 @@ def call(
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[PromptCallResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- HttpResponse[PromptCallResponse]
+ Returns
+ -------
+ HttpResponse[PromptCallResponse]
"""
_response = self._client_wrapper.httpx_client.request(
@@ -876,7 +864,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2049,7 +2036,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2059,134 +2045,135 @@ async def log(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[CreatePromptLogResponse]:
"""
- Log to a Prompt.
+ Log to a Prompt.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise, the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- run_id : typing.Optional[str]
- Unique identifier for the Run to associate the Log to.
+ run_id : typing.Optional[str]
+ Unique identifier for the Run to associate the Log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- output_message : typing.Optional[ChatMessageParams]
- The message returned by the provider.
+ output_message : typing.Optional[ChatMessageParams]
+ The message returned by the provider.
- prompt_tokens : typing.Optional[int]
- Number of tokens in the prompt used to generate the output.
+ prompt_tokens : typing.Optional[int]
+ Number of tokens in the prompt used to generate the output.
- reasoning_tokens : typing.Optional[int]
- Number of reasoning tokens used to generate the output.
+ reasoning_tokens : typing.Optional[int]
+ Number of reasoning tokens used to generate the output.
- output_tokens : typing.Optional[int]
- Number of tokens in the output generated by the model.
+ output_tokens : typing.Optional[int]
+ Number of tokens in the output generated by the model.
- prompt_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the prompt.
+ prompt_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the prompt.
- output_cost : typing.Optional[float]
- Cost in dollars associated to the tokens in the output.
+ output_cost : typing.Optional[float]
+ Cost in dollars associated to the tokens in the output.
- finish_reason : typing.Optional[str]
- Reason the generation finished.
+ finish_reason : typing.Optional[str]
+ Reason the generation finished.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptLogRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptLogRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- output : typing.Optional[str]
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- created_at : typing.Optional[dt.datetime]
- User defined timestamp for when the log was created.
+ output : typing.Optional[str]
+ Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- error : typing.Optional[str]
- Error message if the log is an error.
+ created_at : typing.Optional[dt.datetime]
+ User defined timestamp for when the log was created.
- provider_latency : typing.Optional[float]
- Duration of the logged event in seconds.
+ error : typing.Optional[str]
+ Error message if the log is an error.
- stdout : typing.Optional[str]
- Captured log and debug statements.
+ provider_latency : typing.Optional[float]
+ Duration of the logged event in seconds.
- provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw request sent to provider.
+ stdout : typing.Optional[str]
+ Captured log and debug statements.
- provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Raw response received the provider.
+ provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw request sent to provider.
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Raw response received the provider.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompt_log_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompt_log_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[CreatePromptLogResponse]
- Successful Response
+ Returns
+ -------
+ AsyncHttpResponse[CreatePromptLogResponse]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
"prompts/log",
@@ -2229,7 +2216,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2295,7 +2281,6 @@ async def update_log(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LogResponse]:
"""
@@ -2378,9 +2363,6 @@ async def update_log(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -2420,7 +2402,6 @@ async def update_log(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -2470,7 +2451,6 @@ async def call_stream(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2485,106 +2465,107 @@ async def call_stream(
request_options: typing.Optional[RequestOptions] = None,
) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_stream_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_stream_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Yields
- ------
- typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]
+ Yields
+ ------
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]
"""
async with self._client_wrapper.httpx_client.stream(
@@ -2611,7 +2592,6 @@ async def call_stream(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -2692,7 +2672,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -2707,106 +2686,107 @@ async def call(
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[PromptCallResponse]:
"""
- Call a Prompt.
+ Call a Prompt.
- Calling a Prompt calls the model provider before logging
- the request, responses and metadata to Humanloop.
+ Calling a Prompt calls the model provider before logging
+ the request, responses and metadata to Humanloop.
- You can use query parameters `version_id`, or `environment`, to target
- an existing version of the Prompt. Otherwise the default deployed version will be chosen.
+ You can use query parameters `version_id`, or `environment`, to target
+ an existing version of the Prompt. Otherwise the default deployed version will be chosen.
- Instead of targeting an existing version explicitly, you can instead pass in
- Prompt details in the request body. In this case, we will check if the details correspond
- to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
- in the case where you are storing or deriving your Prompt details in code.
+ Instead of targeting an existing version explicitly, you can instead pass in
+ Prompt details in the request body. In this case, we will check if the details correspond
+ to an existing version of the Prompt. If they do not, we will create a new version. This is helpful
+ in the case where you are storing or deriving your Prompt details in code.
- Parameters
- ----------
- version_id : typing.Optional[str]
- A specific Version ID of the Prompt to log to.
+ Parameters
+ ----------
+ version_id : typing.Optional[str]
+ A specific Version ID of the Prompt to log to.
- environment : typing.Optional[str]
- Name of the Environment identifying a deployed version to log to.
+ environment : typing.Optional[str]
+ Name of the Environment identifying a deployed version to log to.
- path : typing.Optional[str]
- Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
+ path : typing.Optional[str]
+ Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`.
- id : typing.Optional[str]
- ID for an existing Prompt.
+ id : typing.Optional[str]
+ ID for an existing Prompt.
- messages : typing.Optional[typing.Sequence[ChatMessageParams]]
- The messages passed to the to provider chat endpoint.
+ messages : typing.Optional[typing.Sequence[ChatMessageParams]]
+ The messages passed to the to provider chat endpoint.
- tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
+ tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams]
+ Controls how the model uses tools. The following options are supported:
+ - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
+ - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
+ - `'required'` means the model must call one or more of the provided tools.
+ - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptsCallRequestPromptParams]
- The Prompt configuration to use. Two formats are supported:
- - An object representing the details of the Prompt configuration
- - A string representing the raw contents of a .prompt file
- A new Prompt version will be created if the provided details do not match any existing version.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ <<<<<<< HEAD
- inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- The inputs passed to the prompt template.
+ =======
+ >>>>>>> 190233f (Merge branch 'master' into flow-complete-dx)
+ A new Prompt version will be created if the provided details do not match any existing version.
- source : typing.Optional[str]
- Identifies where the model was called from.
+ inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ The inputs passed to the prompt template.
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
- Any additional metadata to record.
+ source : typing.Optional[str]
+ Identifies where the model was called from.
- start_time : typing.Optional[dt.datetime]
- When the logged event started.
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
+ Any additional metadata to record.
- end_time : typing.Optional[dt.datetime]
- When the logged event ended.
+ start_time : typing.Optional[dt.datetime]
+ When the logged event started.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ end_time : typing.Optional[dt.datetime]
+ When the logged event ended.
- source_datapoint_id : typing.Optional[str]
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
+ source_datapoint_id : typing.Optional[str]
+ Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- trace_parent_id : typing.Optional[str]
- The ID of the parent Log to nest this Log under in a Trace.
+ trace_parent_id : typing.Optional[str]
+ The ID of the parent Log to nest this Log under in a Trace.
- user : typing.Optional[str]
- End-user ID related to the Log.
+ user : typing.Optional[str]
+ End-user ID related to the Log.
- prompts_call_request_environment : typing.Optional[str]
- The name of the Environment the Log is associated to.
+ prompts_call_request_environment : typing.Optional[str]
+ The name of the Environment the Log is associated to.
- save : typing.Optional[bool]
- Whether the request/response payloads will be stored on Humanloop.
+ save : typing.Optional[bool]
+ Whether the request/response payloads will be stored on Humanloop.
- log_id : typing.Optional[str]
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
+ log_id : typing.Optional[str]
+ This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- provider_api_keys : typing.Optional[ProviderApiKeysParams]
- API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
+ provider_api_keys : typing.Optional[ProviderApiKeysParams]
+ API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization.
- num_samples : typing.Optional[int]
- The number of generations.
+ num_samples : typing.Optional[int]
+ The number of generations.
- return_inputs : typing.Optional[bool]
- Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
+ return_inputs : typing.Optional[bool]
+ Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true.
- logprobs : typing.Optional[int]
- Include the log probabilities of the top n tokens in the provider_response
+ logprobs : typing.Optional[int]
+ Include the log probabilities of the top n tokens in the provider_response
- suffix : typing.Optional[str]
- The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
+ suffix : typing.Optional[str]
+ The suffix that comes after a completion of inserted text. Useful for completions that act like inserts.
- request_options : typing.Optional[RequestOptions]
- Request-specific configuration.
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
- Returns
- -------
- AsyncHttpResponse[PromptCallResponse]
+ Returns
+ -------
+ AsyncHttpResponse[PromptCallResponse]
"""
_response = await self._client_wrapper.httpx_client.request(
@@ -2833,7 +2813,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
diff --git a/src/humanloop/requests/agent_log_response.py b/src/humanloop/requests/agent_log_response.py
index 940f348..65ff3d4 100644
--- a/src/humanloop/requests/agent_log_response.py
+++ b/src/humanloop/requests/agent_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .agent_log_response_tool_choice import AgentLogResponseToolChoiceParams
from .agent_response import AgentResponseParams
from .chat_message import ChatMessageParams
@@ -135,11 +134,6 @@ class AgentLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/create_agent_log_response.py b/src/humanloop/requests/create_agent_log_response.py
index f68f2e9..fe654f5 100644
--- a/src/humanloop/requests/create_agent_log_response.py
+++ b/src/humanloop/requests/create_agent_log_response.py
@@ -26,5 +26,5 @@ class CreateAgentLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it.
"""
diff --git a/src/humanloop/requests/create_flow_log_response.py b/src/humanloop/requests/create_flow_log_response.py
index 6f490ba..5be10c8 100644
--- a/src/humanloop/requests/create_flow_log_response.py
+++ b/src/humanloop/requests/create_flow_log_response.py
@@ -26,5 +26,5 @@ class CreateFlowLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it.
"""
diff --git a/src/humanloop/requests/evaluator_log_response.py b/src/humanloop/requests/evaluator_log_response.py
index c434280..a2860c4 100644
--- a/src/humanloop/requests/evaluator_log_response.py
+++ b/src/humanloop/requests/evaluator_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgmentParams
from .evaluator_response import EvaluatorResponseParams
@@ -80,11 +79,6 @@ class EvaluatorLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
parent_id: typing_extensions.NotRequired[str]
"""
Identifier of the evaluated Log. The newly created Log will have this one set as parent.
diff --git a/src/humanloop/requests/flow_log_response.py b/src/humanloop/requests/flow_log_response.py
index 661fc30..f930c37 100644
--- a/src/humanloop/requests/flow_log_response.py
+++ b/src/humanloop/requests/flow_log_response.py
@@ -92,7 +92,7 @@ class FlowLogResponseParams(typing_extensions.TypedDict):
log_status: typing_extensions.NotRequired[LogStatus]
"""
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
"""
source_datapoint_id: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/prompt_call_response.py b/src/humanloop/requests/prompt_call_response.py
index 14ff460..cbbbfec 100644
--- a/src/humanloop/requests/prompt_call_response.py
+++ b/src/humanloop/requests/prompt_call_response.py
@@ -4,7 +4,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .prompt_call_log_response import PromptCallLogResponseParams
from .prompt_call_response_tool_choice import PromptCallResponseToolChoiceParams
@@ -60,11 +59,6 @@ class PromptCallResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/prompt_log_response.py b/src/humanloop/requests/prompt_log_response.py
index 6147ade..c91d331 100644
--- a/src/humanloop/requests/prompt_log_response.py
+++ b/src/humanloop/requests/prompt_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .prompt_log_response_tool_choice import PromptLogResponseToolChoiceParams
from .prompt_response import PromptResponseParams
@@ -135,11 +134,6 @@ class PromptLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/provider_api_keys.py b/src/humanloop/requests/provider_api_keys.py
index c37649e..e7baf03 100644
--- a/src/humanloop/requests/provider_api_keys.py
+++ b/src/humanloop/requests/provider_api_keys.py
@@ -1,12 +1,10 @@
# This file was auto-generated by Fern from our API Definition.
import typing_extensions
-from ..core.serialization import FieldMetadata
class ProviderApiKeysParams(typing_extensions.TypedDict):
openai: typing_extensions.NotRequired[str]
- ai_21: typing_extensions.NotRequired[typing_extensions.Annotated[str, FieldMetadata(alias="ai21")]]
mock: typing_extensions.NotRequired[str]
anthropic: typing_extensions.NotRequired[str]
deepseek: typing_extensions.NotRequired[str]
@@ -14,3 +12,4 @@ class ProviderApiKeysParams(typing_extensions.TypedDict):
cohere: typing_extensions.NotRequired[str]
openai_azure: typing_extensions.NotRequired[str]
openai_azure_endpoint: typing_extensions.NotRequired[str]
+ google: typing_extensions.NotRequired[str]
diff --git a/src/humanloop/requests/tool_call_response.py b/src/humanloop/requests/tool_call_response.py
index e00069d..92d6a59 100644
--- a/src/humanloop/requests/tool_call_response.py
+++ b/src/humanloop/requests/tool_call_response.py
@@ -4,7 +4,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .evaluator_log_response import EvaluatorLogResponseParams
from .log_response import LogResponseParams
from .tool_response import ToolResponseParams
@@ -80,11 +79,6 @@ class ToolCallResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/requests/tool_log_response.py b/src/humanloop/requests/tool_log_response.py
index f4be5ad..06835c1 100644
--- a/src/humanloop/requests/tool_log_response.py
+++ b/src/humanloop/requests/tool_log_response.py
@@ -6,7 +6,6 @@
import typing
import typing_extensions
-from ..types.log_status import LogStatus
from .chat_message import ChatMessageParams
from .tool_response import ToolResponseParams
@@ -80,11 +79,6 @@ class ToolLogResponseParams(typing_extensions.TypedDict):
Any additional metadata to record.
"""
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing_extensions.NotRequired[str]
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index d8449a7..eec589d 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -22,7 +22,6 @@
from ..types.files_tool_type import FilesToolType
from ..types.list_tools import ListTools
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.sort_order import SortOrder
from ..types.tool_call_response import ToolCallResponse
from ..types.tool_response import ToolResponse
@@ -60,7 +59,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -114,9 +112,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -160,7 +155,6 @@ def call(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -191,7 +185,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -264,9 +257,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -322,7 +312,6 @@ def log(
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -350,7 +339,6 @@ def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -402,9 +390,6 @@ def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -434,7 +419,6 @@ def update(
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
@@ -1066,7 +1050,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1120,9 +1103,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1169,7 +1149,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1200,7 +1179,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1273,9 +1251,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1334,7 +1309,6 @@ async def main() -> None:
inputs=inputs,
source=source,
metadata=metadata,
- log_status=log_status,
source_datapoint_id=source_datapoint_id,
trace_parent_id=trace_parent_id,
user=user,
@@ -1362,7 +1336,6 @@ async def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LogResponse:
"""
@@ -1414,9 +1387,6 @@ async def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1449,7 +1419,6 @@ async def main() -> None:
metadata=metadata,
start_time=start_time,
end_time=end_time,
- log_status=log_status,
request_options=request_options,
)
return _response.data
diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py
index 85bbef9..9065796 100644
--- a/src/humanloop/tools/raw_client.py
+++ b/src/humanloop/tools/raw_client.py
@@ -30,7 +30,6 @@
from ..types.http_validation_error import HttpValidationError
from ..types.list_tools import ListTools
from ..types.log_response import LogResponse
-from ..types.log_status import LogStatus
from ..types.paginated_data_tool_response import PaginatedDataToolResponse
from ..types.sort_order import SortOrder
from ..types.tool_call_response import ToolCallResponse
@@ -57,7 +56,6 @@ def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -111,9 +109,6 @@ def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -158,7 +153,6 @@ def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -218,7 +212,6 @@ def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -291,9 +284,6 @@ def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -345,7 +335,6 @@ def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -402,7 +391,6 @@ def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[LogResponse]:
"""
@@ -454,9 +442,6 @@ def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -481,7 +466,6 @@ def update(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
@@ -1493,7 +1477,6 @@ async def call(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1547,9 +1530,6 @@ async def call(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1594,7 +1574,6 @@ async def call(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -1654,7 +1633,6 @@ async def log(
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
source_datapoint_id: typing.Optional[str] = OMIT,
trace_parent_id: typing.Optional[str] = OMIT,
user: typing.Optional[str] = OMIT,
@@ -1727,9 +1705,6 @@ async def log(
metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Any additional metadata to record.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
source_datapoint_id : typing.Optional[str]
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
@@ -1781,7 +1756,6 @@ async def log(
"inputs": inputs,
"source": source,
"metadata": metadata,
- "log_status": log_status,
"source_datapoint_id": source_datapoint_id,
"trace_parent_id": trace_parent_id,
"user": user,
@@ -1838,7 +1812,6 @@ async def update(
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
- log_status: typing.Optional[LogStatus] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[LogResponse]:
"""
@@ -1890,9 +1863,6 @@ async def update(
end_time : typing.Optional[dt.datetime]
When the logged event ended.
- log_status : typing.Optional[LogStatus]
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
-
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1917,7 +1887,6 @@ async def update(
"metadata": metadata,
"start_time": start_time,
"end_time": end_time,
- "log_status": log_status,
},
headers={
"content-type": "application/json",
diff --git a/src/humanloop/types/agent_log_response.py b/src/humanloop/types/agent_log_response.py
index 634ad4d..0128b22 100644
--- a/src/humanloop/types/agent_log_response.py
+++ b/src/humanloop/types/agent_log_response.py
@@ -10,7 +10,6 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .agent_log_response_tool_choice import AgentLogResponseToolChoice
from .chat_message import ChatMessage
-from .log_status import LogStatus
class AgentLogResponse(UncheckedBaseModel):
@@ -132,11 +131,6 @@ class AgentLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/create_agent_log_response.py b/src/humanloop/types/create_agent_log_response.py
index 2fe74aa..a890ee4 100644
--- a/src/humanloop/types/create_agent_log_response.py
+++ b/src/humanloop/types/create_agent_log_response.py
@@ -30,7 +30,7 @@ class CreateAgentLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Agent Logs.
+ Status of the Agent Log. When a Agent Log is marked as `complete`, no more Logs can be added to it.
"""
if IS_PYDANTIC_V2:
diff --git a/src/humanloop/types/create_flow_log_response.py b/src/humanloop/types/create_flow_log_response.py
index ae296a6..28e276d 100644
--- a/src/humanloop/types/create_flow_log_response.py
+++ b/src/humanloop/types/create_flow_log_response.py
@@ -30,7 +30,7 @@ class CreateFlowLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs.
+ Status of the Flow Log. When a Flow Log is marked as `complete`, no more Logs can be added to it.
"""
if IS_PYDANTIC_V2:
diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py
index e006e7a..c3dafce 100644
--- a/src/humanloop/types/evaluator_log_response.py
+++ b/src/humanloop/types/evaluator_log_response.py
@@ -10,7 +10,6 @@
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
from .evaluator_log_response_judgment import EvaluatorLogResponseJudgment
-from .log_status import LogStatus
class EvaluatorLogResponse(UncheckedBaseModel):
@@ -78,11 +77,6 @@ class EvaluatorLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
parent_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Identifier of the evaluated Log. The newly created Log will have this one set as parent.
diff --git a/src/humanloop/types/event_type.py b/src/humanloop/types/event_type.py
index 128eed9..232e1a6 100644
--- a/src/humanloop/types/event_type.py
+++ b/src/humanloop/types/event_type.py
@@ -8,6 +8,7 @@
"agent_turn_suspend",
"agent_turn_continue",
"agent_turn_end",
+ "agent_turn_error",
"agent_start",
"agent_update",
"agent_end",
@@ -15,7 +16,6 @@
"tool_update",
"tool_end",
"error",
- "agent_generation_error",
],
typing.Any,
]
diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py
index 188c1fd..9472a5b 100644
--- a/src/humanloop/types/flow_log_response.py
+++ b/src/humanloop/types/flow_log_response.py
@@ -89,7 +89,7 @@ class FlowLogResponse(UncheckedBaseModel):
log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
"""
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
+ Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. You cannot update a Flow Log's status from `complete` to `incomplete`.
"""
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py
index f20ce5f..7c4b5e1 100644
--- a/src/humanloop/types/prompt_call_response.py
+++ b/src/humanloop/types/prompt_call_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -63,11 +62,6 @@ class PromptCallResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py
index 8bea978..64982bb 100644
--- a/src/humanloop/types/prompt_log_response.py
+++ b/src/humanloop/types/prompt_log_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
from .prompt_log_response_tool_choice import PromptLogResponseToolChoice
@@ -132,11 +131,6 @@ class PromptLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/provider_api_keys.py b/src/humanloop/types/provider_api_keys.py
index 49bf873..540f62c 100644
--- a/src/humanloop/types/provider_api_keys.py
+++ b/src/humanloop/types/provider_api_keys.py
@@ -3,15 +3,12 @@
import typing
import pydantic
-import typing_extensions
from ..core.pydantic_utilities import IS_PYDANTIC_V2
-from ..core.serialization import FieldMetadata
from ..core.unchecked_base_model import UncheckedBaseModel
class ProviderApiKeys(UncheckedBaseModel):
openai: typing.Optional[str] = None
- ai_21: typing_extensions.Annotated[typing.Optional[str], FieldMetadata(alias="ai21")] = None
mock: typing.Optional[str] = None
anthropic: typing.Optional[str] = None
deepseek: typing.Optional[str] = None
@@ -19,6 +16,7 @@ class ProviderApiKeys(UncheckedBaseModel):
cohere: typing.Optional[str] = None
openai_azure: typing.Optional[str] = None
openai_azure_endpoint: typing.Optional[str] = None
+ google: typing.Optional[str] = None
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
diff --git a/src/humanloop/types/tool_call_response.py b/src/humanloop/types/tool_call_response.py
index d3b660e..b2301cc 100644
--- a/src/humanloop/types/tool_call_response.py
+++ b/src/humanloop/types/tool_call_response.py
@@ -8,7 +8,6 @@
import pydantic
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
-from .log_status import LogStatus
class ToolCallResponse(UncheckedBaseModel):
@@ -81,11 +80,6 @@ class ToolCallResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py
index 2524eb5..abc308d 100644
--- a/src/humanloop/types/tool_log_response.py
+++ b/src/humanloop/types/tool_log_response.py
@@ -9,7 +9,6 @@
from ..core.pydantic_utilities import IS_PYDANTIC_V2, update_forward_refs
from ..core.unchecked_base_model import UncheckedBaseModel
from .chat_message import ChatMessage
-from .log_status import LogStatus
class ToolLogResponse(UncheckedBaseModel):
@@ -77,11 +76,6 @@ class ToolLogResponse(UncheckedBaseModel):
Any additional metadata to record.
"""
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior.
- """
-
source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
"""
Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.