diff --git a/.fernignore b/.fernignore index d12f0dfb..112f779b 100644 --- a/.fernignore +++ b/.fernignore @@ -4,6 +4,7 @@ ## Custom code src/humanloop/evals +src/humanloop/prompt_utils.py src/humanloop/client.py src/humanloop/overload.py src/humanloop/context.py diff --git a/reference.md b/reference.md index 2cee6af8..728469c7 100644 --- a/reference.md +++ b/reference.md @@ -681,7 +681,7 @@ client = Humanloop( api_key="YOUR_API_KEY", ) response = client.prompts.call_stream() -for chunk in response: +for chunk in response.data: yield chunk ``` @@ -1313,9 +1313,9 @@ Create a Prompt or update it with a new version if it already exists. Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. -If you provide a commit message, then the new version will be committed; -otherwise it will be uncommitted. If you try to commit an already committed version, -an exception will be raised. +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Prompt - attempting to create a version with a name +that already exists will result in a 409 Conflict error. @@ -1348,7 +1348,6 @@ client.prompts.upsert( provider="openai", max_tokens=-1, temperature=0.7, - commit_message="Initial commit", ) ``` @@ -1532,7 +1531,15 @@ Input variables should be specified with double curly bracket syntax: `{{input_n
-**commit_message:** `typing.Optional[str]` — Message describing the changes made. +**version_name:** `typing.Optional[str]` — Unique name for the Prompt version. Version names must be unique for a given Prompt. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -1822,7 +1829,7 @@ client.prompts.move( -
client.prompts.populate_template(...) +
client.prompts.populate(...)
@@ -1857,7 +1864,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.prompts.populate_template( +client.prompts.populate( id="id", request={"key": "value"}, ) @@ -1954,7 +1961,6 @@ client = Humanloop( ) client.prompts.list_versions( id="pr_30gco7dx6JDq4200GVOHa", - status="committed", ) ``` @@ -1979,14 +1985,6 @@ client.prompts.list_versions(
-**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - -
-
- -
-
- **evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -2007,7 +2005,7 @@ client.prompts.list_versions(
-
client.prompts.commit(...) +
client.prompts.delete_prompt_version(...)
@@ -2019,9 +2017,7 @@ client.prompts.list_versions(
-Commit a version of the Prompt with a commit message. - -If the version is already committed, an exception will be raised. +Delete a version of the Prompt.
@@ -2041,10 +2037,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.prompts.commit( - id="pr_30gco7dx6JDq4200GVOHa", - version_id="prv_F34aba5f3asp0", - commit_message="Reiterated point about not discussing sentience", +client.prompts.delete_prompt_version( + id="id", + version_id="version_id", ) ``` @@ -2077,14 +2072,6 @@ client.prompts.commit(
-**commit_message:** `str` — Message describing the changes made. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2097,7 +2084,7 @@ client.prompts.commit(
-
client.prompts.delete_prompt_version(...) +
client.prompts.patch_prompt_version(...)
@@ -2109,7 +2096,7 @@ client.prompts.commit(
-Delete a version of the Prompt. +Update the name or description of the Prompt version.
@@ -2129,7 +2116,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.prompts.delete_prompt_version( +client.prompts.patch_prompt_version( id="id", version_id="version_id", ) @@ -2164,6 +2151,22 @@ client.prompts.delete_prompt_version(
+**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3108,9 +3111,9 @@ Create a Tool or update it with a new version if it already exists. Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. -If you provide a commit message, then the new version will be committed; -otherwise it will be uncommitted. If you try to commit an already committed version, -an exception will be raised. +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Tool - attempting to create a version with a name +that already exists will result in a 409 Conflict error.
@@ -3141,7 +3144,6 @@ client.tools.upsert( "required": ["a", "b"], }, }, - commit_message="Initial commit", ) ``` @@ -3214,7 +3216,15 @@ client.tools.upsert(
-**commit_message:** `typing.Optional[str]` — Message describing the changes made. +**version_name:** `typing.Optional[str]` — Unique identifier for this Tool version. Each Tool can only have one version with a given name. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the Version.
@@ -3514,7 +3524,6 @@ client = Humanloop( ) client.tools.list_versions( id="tl_789ghi", - status="committed", ) ``` @@ -3539,14 +3548,6 @@ client.tools.list_versions(
-**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - -
-
- -
-
- **evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -3567,7 +3568,7 @@ client.tools.list_versions(
-
client.tools.commit(...) +
client.tools.delete_tool_version(...)
@@ -3579,9 +3580,7 @@ client.tools.list_versions(
-Commit a version of the Tool with a commit message. - -If the version is already committed, an exception will be raised. +Delete a version of the Tool.
@@ -3601,10 +3600,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.tools.commit( - id="tl_789ghi", - version_id="tv_012jkl", - commit_message="Initial commit", +client.tools.delete_tool_version( + id="id", + version_id="version_id", ) ``` @@ -3637,14 +3635,6 @@ client.tools.commit(
-**commit_message:** `str` — Message describing the changes made. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -3657,7 +3647,7 @@ client.tools.commit(
-
client.tools.delete_tool_version(...) +
client.tools.update_tool_version(...)
@@ -3669,7 +3659,7 @@ client.tools.commit(
-Delete a version of the Tool. +Update the name or description of the Tool version.
@@ -3689,7 +3679,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.tools.delete_tool_version( +client.tools.update_tool_version( id="id", version_id="version_id", ) @@ -3724,6 +3714,22 @@ client.tools.delete_tool_version(
+**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4211,9 +4217,9 @@ by specifying `action` as `add` or `remove` respectively. In this case, you may the `version_id` or `environment` query parameters to identify the existing version to base the new version on. If neither is provided, the latest created version will be used. -If you provide a commit message, then the new version will be committed; -otherwise it will be uncommitted. If you try to commit an already committed version, -an exception will be raised. +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Dataset - attempting to create a version with a name +that already exists will result in a 409 Conflict error. Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, @@ -4263,7 +4269,6 @@ client.datasets.upsert( }, }, ], - commit_message="Add two new questions and answers", ) ``` @@ -4352,7 +4357,15 @@ If `"add"` or `"remove"`, one of the `version_id` or `environment` query paramet
-**commit_message:** `typing.Optional[str]` — Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. +**version_name:** `typing.Optional[str]` — Unique name for the Dataset version. Version names must be unique for a given Dataset. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -4774,7 +4787,6 @@ client = Humanloop( ) client.datasets.list_versions( id="ds_b0baF1ca7652", - status="committed", ) ``` @@ -4799,15 +4811,7 @@ client.datasets.list_versions(
-**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - -
-
- -
-
- -**include_datapoints:** `typing.Optional[typing.Literal["latest_committed"]]` — If set to 'latest_committed', include the Datapoints for the latest committed version. Defaults to `None`. +**include_datapoints:** `typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints]` — If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only.
@@ -4827,7 +4831,7 @@ client.datasets.list_versions(
-
client.datasets.commit(...) +
client.datasets.delete_dataset_version(...)
@@ -4839,9 +4843,7 @@ client.datasets.list_versions(
-Commit a version of the Dataset with a commit message. - -If the version is already committed, an exception will be raised. +Delete a version of the Dataset.
@@ -4861,10 +4863,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.datasets.commit( - id="ds_b0baF1ca7652", - version_id="dsv_6L78pqrdFi2xa", - commit_message="initial commit", +client.datasets.delete_dataset_version( + id="id", + version_id="version_id", ) ``` @@ -4897,14 +4898,6 @@ client.datasets.commit(
-**commit_message:** `str` — Message describing the changes made. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -4917,7 +4910,7 @@ client.datasets.commit(
-
client.datasets.delete_dataset_version(...) +
client.datasets.update_dataset_version(...)
@@ -4929,7 +4922,7 @@ client.datasets.commit(
-Delete a version of the Dataset. +Update the name or description of the Dataset version.
@@ -4949,7 +4942,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.datasets.delete_dataset_version( +client.datasets.update_dataset_version( id="id", version_id="version_id", ) @@ -4984,6 +4977,22 @@ client.datasets.delete_dataset_version(
+**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5010,12 +5019,15 @@ client.datasets.delete_dataset_version( Add Datapoints from a CSV file to a Dataset. -This will create a new committed version of the Dataset with the Datapoints from the CSV file. +This will create a new version of the Dataset with the Datapoints from the CSV file. If either `version_id` or `environment` is provided, the new version will be based on the specified version, with the Datapoints from the CSV file added to the existing Datapoints in the version. If neither `version_id` nor `environment` is provided, the new version will be based on the version of the Dataset that is deployed to the default Environment. + +You can optionally provide a name and description for the new version using `version_name` +and `version_description` parameters.
@@ -5037,7 +5049,6 @@ client = Humanloop( ) client.datasets.upload_csv( id="id", - commit_message="commit_message", ) ``` @@ -5072,7 +5083,7 @@ core.File` — See core.File for more documentation
-**commit_message:** `str` — Commit message for the new Dataset version. +**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on.
@@ -5080,7 +5091,7 @@ core.File` — See core.File for more documentation
-**version_id:** `typing.Optional[str]` — ID of the specific Dataset version to base the created Version on. +**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on.
@@ -5088,7 +5099,15 @@ core.File` — See core.File for more documentation
-**environment:** `typing.Optional[str]` — Name of the Environment identifying a deployed Version to base the created Version on. +**version_name:** `typing.Optional[str]` — Name for the new Dataset version. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description for the new Dataset version.
@@ -5769,9 +5788,9 @@ Create an Evaluator or update it with a new version if it already exists. Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. -If you provide a commit message, then the new version will be committed; -otherwise it will be uncommitted. If you try to commit an already committed version, -an exception will be raised. +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within an Evaluator - attempting to create a version with a name +that already exists will result in a 409 Conflict error.
@@ -5799,7 +5818,6 @@ client.evaluators.upsert( "evaluator_type": "python", "code": "def evaluate(answer, target):\n return 0.5", }, - commit_message="Initial commit", ) ``` @@ -5840,7 +5858,15 @@ client.evaluators.upsert(
-**commit_message:** `typing.Optional[str]` — Message describing the changes made. +**version_name:** `typing.Optional[str]` — Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -6164,14 +6190,6 @@ client.evaluators.list_versions(
-**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - -
-
- -
-
- **evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -6192,7 +6210,7 @@ client.evaluators.list_versions(
-
client.evaluators.commit(...) +
client.evaluators.delete_evaluator_version(...)
@@ -6204,9 +6222,7 @@ client.evaluators.list_versions(
-Commit a version of the Evaluator with a commit message. - -If the version is already committed, an exception will be raised. +Delete a version of the Evaluator.
@@ -6226,10 +6242,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.evaluators.commit( - id="ev_890bcd", - version_id="evv_012def", - commit_message="Initial commit", +client.evaluators.delete_evaluator_version( + id="id", + version_id="version_id", ) ``` @@ -6246,7 +6261,7 @@ client.evaluators.commit(
-**id:** `str` — Unique identifier for Prompt. +**id:** `str` — Unique identifier for Evaluator.
@@ -6262,14 +6277,6 @@ client.evaluators.commit(
-**commit_message:** `str` — Message describing the changes made. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -6282,7 +6289,7 @@ client.evaluators.commit(
-
client.evaluators.delete_evaluator_version(...) +
client.evaluators.update_evaluator_version(...)
@@ -6294,7 +6301,7 @@ client.evaluators.commit(
-Delete a version of the Evaluator. +Update the name or description of the Evaluator version.
@@ -6314,7 +6321,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.evaluators.delete_evaluator_version( +client.evaluators.update_evaluator_version( id="id", version_id="version_id", ) @@ -6349,6 +6356,22 @@ client.evaluators.delete_evaluator_version(
+**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -7522,9 +7545,9 @@ Create or update a Flow. Flows can also be identified by the `ID` or their `path`. -If you provide a commit message, then the new version will be committed; -otherwise it will be uncommitted. If you try to commit an already committed version, -an exception will be raised. +You can provide `version_name` and `version_description` to identify and describe your versions. +Version names must be unique within a Flow - attempting to create a version with a name +that already exists will result in a 409 Conflict error.
@@ -7557,7 +7580,6 @@ client.flows.upsert( "description": "Retrieval tool for MedQA.", "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", }, - "commit_message": "Initial commit", }, ) @@ -7599,7 +7621,15 @@ client.flows.upsert(
-**commit_message:** `typing.Optional[str]` — Message describing the changes made. +**version_name:** `typing.Optional[str]` — Unique name for the Flow version. Version names must be unique for a given Flow. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — Description of the version, e.g., the changes made in this version.
@@ -7653,7 +7683,6 @@ client = Humanloop( ) client.flows.list_versions( id="fl_6o701g4jmcanPVHxdqD0O", - status="committed", ) ``` @@ -7678,14 +7707,6 @@ client.flows.list_versions(
-**status:** `typing.Optional[VersionStatus]` — Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - -
-
- -
-
- **evaluator_aggregates:** `typing.Optional[bool]` — Whether to include Evaluator aggregate results for the versions in the response
@@ -7706,7 +7727,7 @@ client.flows.list_versions(
-
client.flows.commit(...) +
client.flows.delete_flow_version(...)
@@ -7718,9 +7739,7 @@ client.flows.list_versions(
-Commit a version of the Flow with a commit message. - -If the version is already committed, an exception will be raised. +Delete a version of the Flow.
@@ -7740,10 +7759,9 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.commit( - id="fl_6o701g4jmcanPVHxdqD0O", - version_id="flv_6o701g4jmcanPVHxdqD0O", - commit_message="RAG lookup tool bug fixing", +client.flows.delete_flow_version( + id="id", + version_id="version_id", ) ``` @@ -7776,14 +7794,6 @@ client.flows.commit(
-**commit_message:** `str` — Message describing the changes made. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -7796,7 +7806,7 @@ client.flows.commit(
-
client.flows.delete_flow_version(...) +
client.flows.update_flow_version(...)
@@ -7808,7 +7818,7 @@ client.flows.commit(
-Delete a version of the Flow. +Update the name or description of the Flow version.
@@ -7828,7 +7838,7 @@ from humanloop import Humanloop client = Humanloop( api_key="YOUR_API_KEY", ) -client.flows.delete_flow_version( +client.flows.update_flow_version( id="id", version_id="version_id", ) @@ -7863,6 +7873,22 @@ client.flows.delete_flow_version(
+**name:** `typing.Optional[str]` — Name of the version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — Description of the version. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py index 2734e469..0c431892 100644 --- a/src/humanloop/__init__.py +++ b/src/humanloop/__init__.py @@ -10,7 +10,6 @@ ChatRole, ChatToolType, CodeEvaluatorRequest, - CommitRequest, ConfigToolResponse, CreateDatapointRequest, CreateDatapointRequestTargetValue, @@ -147,6 +146,7 @@ ToolResponse, UpdateDatesetAction, UpdateEvaluationStatusRequest, + UpdateVersionRequest, UserResponse, Valence, ValidationError, @@ -164,6 +164,7 @@ from .errors import UnprocessableEntityError from . import datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools from .client import AsyncHumanloop, Humanloop +from .datasets import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints from .environment import HumanloopEnvironment from .evaluations import ( AddEvaluatorsRequestEvaluatorsItem, @@ -205,7 +206,6 @@ ChatMessageContentParams, ChatMessageParams, CodeEvaluatorRequestParams, - CommitRequestParams, CreateDatapointRequestParams, CreateDatapointRequestTargetValueParams, CreateEvaluatorLogResponseParams, @@ -312,6 +312,7 @@ ToolKernelRequestParams, ToolLogResponseParams, ToolResponseParams, + UpdateVersionRequestParams, ValidationErrorLocItemParams, ValidationErrorParams, VersionDeploymentResponseFileParams, @@ -344,8 +345,6 @@ "ChatToolType", "CodeEvaluatorRequest", "CodeEvaluatorRequestParams", - "CommitRequest", - "CommitRequestParams", "ConfigToolResponse", "CreateDatapointRequest", "CreateDatapointRequestParams", @@ -490,6 +489,7 @@ "ListPromptsParams", "ListTools", "ListToolsParams", + "ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints", "LlmEvaluatorRequest", "LlmEvaluatorRequestParams", "LogResponse", @@ -617,6 +617,8 @@ "UnprocessableEntityError", "UpdateDatesetAction", "UpdateEvaluationStatusRequest", + "UpdateVersionRequest", + "UpdateVersionRequestParams", "UserResponse", "Valence", "ValidationError", diff --git a/src/humanloop/base_client.py b/src/humanloop/base_client.py index 5a643570..bf72be6a 100644 --- a/src/humanloop/base_client.py +++ b/src/humanloop/base_client.py @@ -74,7 +74,9 @@ def __init__( follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read + ) if api_key is None: raise ApiError( body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" @@ -147,7 +149,9 @@ def __init__( follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.AsyncClient] = None, ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + _defaulted_timeout = ( + timeout if timeout is not None else 60 if httpx_client is None else httpx_client.timeout.read + ) if api_key is None: raise ApiError( body="The client must be instantiated be either passing in api_key or setting HUMANLOOP_API_KEY" diff --git a/src/humanloop/client.py b/src/humanloop/client.py index 68bcd076..2daa7769 100644 --- a/src/humanloop/client.py +++ b/src/humanloop/client.py @@ -22,6 +22,8 @@ from humanloop.otel import instrument_provider from humanloop.otel.exporter import HumanloopSpanExporter from humanloop.otel.processor import HumanloopSpanProcessor +from humanloop.prompt_utils import populate_template +from humanloop.prompts.client import PromptsClient class ExtendedEvalsClient(EvaluationsClient): @@ -68,6 +70,14 @@ def run( ) +class ExtendedPromptsClient(PromptsClient): + """ + Adds utility for populating Prompt template inputs. + """ + + populate_template = staticmethod(populate_template) # type: ignore [assignment] + + class Humanloop(BaseHumanloop): """ See docstring of :class:`BaseHumanloop`. @@ -109,6 +119,7 @@ def __init__( eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper) eval_client.client = self self.evaluations = eval_client + self.prompts = ExtendedPromptsClient(client_wrapper=self._client_wrapper) # Overload the .log method of the clients to be aware of Evaluation Context # and the @flow decorator providing the trace_id diff --git a/src/humanloop/core/__init__.py b/src/humanloop/core/__init__.py index 0379aa30..d3eb2a8f 100644 --- a/src/humanloop/core/__init__.py +++ b/src/humanloop/core/__init__.py @@ -5,6 +5,7 @@ from .datetime_utils import serialize_datetime from .file import File, convert_file_dict_to_httpx_tuples, with_content_type from .http_client import AsyncHttpClient, HttpClient +from .http_response import AsyncHttpResponse, HttpResponse from .jsonable_encoder import jsonable_encoder from .pagination import AsyncPager, SyncPager from .pydantic_utilities import ( @@ -26,11 +27,13 @@ "ApiError", "AsyncClientWrapper", "AsyncHttpClient", + "AsyncHttpResponse", "AsyncPager", "BaseClientWrapper", "FieldMetadata", "File", "HttpClient", + "HttpResponse", "IS_PYDANTIC_V2", "RequestOptions", "SyncClientWrapper", diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py index 751292f1..0edcfac8 100644 --- a/src/humanloop/core/client_wrapper.py +++ b/src/humanloop/core/client_wrapper.py @@ -14,6 +14,7 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { + "User-Agent": "humanloop/0.8.30", "X-Fern-Language": "Python", "X-Fern-SDK-Name": "humanloop", "X-Fern-SDK-Version": "0.8.30", diff --git a/src/humanloop/core/http_client.py b/src/humanloop/core/http_client.py index 275a54cc..e7bd4f79 100644 --- a/src/humanloop/core/http_client.py +++ b/src/humanloop/core/http_client.py @@ -2,7 +2,6 @@ import asyncio import email.utils -import json import re import time import typing @@ -11,7 +10,6 @@ from random import random import httpx - from .file import File, convert_file_dict_to_httpx_tuples from .jsonable_encoder import jsonable_encoder from .query_encoder import encode_query diff --git a/src/humanloop/core/http_response.py b/src/humanloop/core/http_response.py new file mode 100644 index 00000000..c72a9130 --- /dev/null +++ b/src/humanloop/core/http_response.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Dict, Generic, TypeVar + +import httpx + +T = TypeVar("T") + + +class HttpResponse(Generic[T]): + _response: httpx.Response + _data: T + + def __init__(self, response: httpx.Response, data: T): + self._response = response + self._data = data + + @property + def headers(self) -> Dict[str, str]: + return dict(self._response.headers) + + @property + def data(self) -> T: + return self._data + + def close(self) -> None: + self._response.close() + + +class AsyncHttpResponse(Generic[T]): + _response: httpx.Response + _data: T + + def __init__(self, response: httpx.Response, data: T): + self._response = response + self._data = data + + @property + def headers(self) -> Dict[str, str]: + return dict(self._response.headers) + + @property + def data(self) -> T: + return self._data + + async def close(self) -> None: + await self._response.aclose() diff --git a/src/humanloop/core/jsonable_encoder.py b/src/humanloop/core/jsonable_encoder.py index 1b631e90..afee3662 100644 --- a/src/humanloop/core/jsonable_encoder.py +++ b/src/humanloop/core/jsonable_encoder.py @@ -17,7 +17,6 @@ from typing import Any, Callable, Dict, List, Optional, Set, Union import pydantic - from .datetime_utils import serialize_datetime from .pydantic_utilities import ( IS_PYDANTIC_V2, diff --git a/src/humanloop/core/pagination.py b/src/humanloop/core/pagination.py index 74f8ae61..e7e31291 100644 --- a/src/humanloop/core/pagination.py +++ b/src/humanloop/core/pagination.py @@ -2,9 +2,8 @@ import typing -from typing_extensions import Self - import pydantic +from typing_extensions import Self # Generic to represent the underlying type of the results within a page T = typing.TypeVar("T") diff --git a/src/humanloop/core/pydantic_utilities.py b/src/humanloop/core/pydantic_utilities.py index ca1f4792..f7467bcc 100644 --- a/src/humanloop/core/pydantic_utilities.py +++ b/src/humanloop/core/pydantic_utilities.py @@ -5,10 +5,8 @@ import typing from collections import defaultdict -import typing_extensions - import pydantic - +import typing_extensions from .datetime_utils import serialize_datetime from .serialization import convert_and_respect_annotation_metadata diff --git a/src/humanloop/core/serialization.py b/src/humanloop/core/serialization.py index cb5dcbf9..e3d17f00 100644 --- a/src/humanloop/core/serialization.py +++ b/src/humanloop/core/serialization.py @@ -4,9 +4,8 @@ import inspect import typing -import typing_extensions - import pydantic +import typing_extensions class FieldMetadata: diff --git a/src/humanloop/core/unchecked_base_model.py b/src/humanloop/core/unchecked_base_model.py index d111b06d..2c2d92a7 100644 --- a/src/humanloop/core/unchecked_base_model.py +++ b/src/humanloop/core/unchecked_base_model.py @@ -5,11 +5,8 @@ import typing import uuid -import typing_extensions -from pydantic_core import PydanticUndefined - import pydantic - +import typing_extensions from .pydantic_utilities import ( IS_PYDANTIC_V2, ModelField, @@ -23,6 +20,7 @@ parse_obj_as, ) from .serialization import get_field_to_alias_mapping +from pydantic_core import PydanticUndefined class UnionMetadata: diff --git a/src/humanloop/datasets/__init__.py b/src/humanloop/datasets/__init__.py index f3ea2659..5b47c541 100644 --- a/src/humanloop/datasets/__init__.py +++ b/src/humanloop/datasets/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints + +__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py index a3741977..795400a8 100644 --- a/src/humanloop/datasets/client.py +++ b/src/humanloop/datasets/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawDatasetsClient from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.request_options import RequestOptions @@ -15,15 +16,17 @@ from ..core.api_error import ApiError from ..requests.create_datapoint_request import CreateDatapointRequestParams from ..types.update_dateset_action import UpdateDatesetAction -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.jsonable_encoder import jsonable_encoder from ..types.datapoint_response import DatapointResponse +from ..core.jsonable_encoder import jsonable_encoder from ..types.paginated_datapoint_response import PaginatedDatapointResponse -from ..types.version_status import VersionStatus +from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) from ..types.list_datasets import ListDatasets from .. import core from ..types.file_environment_response import FileEnvironmentResponse from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawDatasetsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -32,7 +35,18 @@ class DatasetsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawDatasetsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawDatasetsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawDatasetsClient + """ + return self._raw_client def list( self, @@ -93,7 +107,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "datasets", method="GET", params={ @@ -153,7 +167,8 @@ def upsert( id: typing.Optional[str] = OMIT, action: typing.Optional[UpdateDatesetAction] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DatasetResponse: """ @@ -167,9 +182,9 @@ def upsert( the `version_id` or `environment` query parameters to identify the existing version to base the new version on. If neither is provided, the latest created version will be used. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, @@ -207,8 +222,11 @@ def upsert( attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - commit_message : typing.Optional[str] - Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -238,56 +256,22 @@ def upsert( }, ], action="set", - commit_message="Add two new questions and answers", ) """ - _response = self._client_wrapper.httpx_client.request( - "datasets", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - json={ - "path": path, - "id": id, - "datapoints": convert_and_respect_annotation_metadata( - object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" - ), - "action": action, - "attributes": attributes, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.upsert( + datapoints=datapoints, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + path=path, + id=id, + action=action, + attributes=attributes, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def get( self, @@ -344,39 +328,14 @@ def get( include_datapoints=True, ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, + response = self._raw_client.get( + id, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, request_options=request_options, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -405,28 +364,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def move( self, @@ -469,42 +408,8 @@ def move( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data def list_datapoints( self, @@ -562,7 +467,7 @@ def list_datapoints( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( f"datasets/{jsonable_encoder(id)}/datapoints", method="GET", params={ @@ -612,8 +517,7 @@ def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, - include_datapoints: typing.Optional[typing.Literal["latest_committed"]] = None, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListDatasets: """ @@ -624,11 +528,8 @@ def list_versions( id : str Unique identifier for Dataset. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - - include_datapoints : typing.Optional[typing.Literal["latest_committed"]] - If set to 'latest_committed', include the Datapoints for the latest committed version. Defaults to `None`. + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -647,49 +548,18 @@ def list_versions( ) client.datasets.list_versions( id="ds_b0baF1ca7652", - status="committed", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "include_datapoints": include_datapoints, - }, - request_options=request_options, + response = self._raw_client.list_versions( + id, include_datapoints=include_datapoints, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListDatasets, - construct_type( - type_=ListDatasets, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> DatasetResponse: + def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Dataset with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Dataset. Parameters ---------- @@ -699,16 +569,12 @@ def commit( version_id : str Unique identifier for the specific version of the Dataset. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DatasetResponse - Successful Response + None Examples -------- @@ -717,50 +583,25 @@ def commit( client = Humanloop( api_key="YOUR_API_KEY", ) - client.datasets.commit( - id="ds_b0baF1ca7652", - version_id="dsv_6L78pqrdFi2xa", - commit_message="initial commit", + client.datasets.delete_dataset_version( + id="id", + version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) + return response.data - def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: """ - Delete a version of the Dataset. + Update the name or description of the Dataset version. Parameters ---------- @@ -770,12 +611,19 @@ def delete_dataset_version( version_id : str Unique identifier for the specific version of the Dataset. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + DatasetResponse + Successful Response Examples -------- @@ -784,54 +632,40 @@ def delete_dataset_version( client = Humanloop( api_key="YOUR_API_KEY", ) - client.datasets.delete_dataset_version( + client.datasets.update_dataset_version( id="id", version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = self._raw_client.update_dataset_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def upload_csv( self, id: str, *, file: core.File, - commit_message: str, version_id: typing.Optional[str] = None, environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DatasetResponse: """ Add Datapoints from a CSV file to a Dataset. - This will create a new committed version of the Dataset with the Datapoints from the CSV file. + This will create a new version of the Dataset with the Datapoints from the CSV file. If either `version_id` or `environment` is provided, the new version will be based on the specified version, with the Datapoints from the CSV file added to the existing Datapoints in the version. If neither `version_id` nor `environment` is provided, the new version will be based on the version of the Dataset that is deployed to the default Environment. + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + Parameters ---------- id : str @@ -840,15 +674,18 @@ def upload_csv( file : core.File See core.File for more documentation - commit_message : str - Commit message for the new Dataset version. - version_id : typing.Optional[str] ID of the specific Dataset version to base the created Version on. environment : typing.Optional[str] Name of the Environment identifying a deployed Version to base the created Version on. + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -866,48 +703,18 @@ def upload_csv( ) client.datasets.upload_csv( id="id", - commit_message="commit_message", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints/csv", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - data={ - "commit_message": commit_message, - }, - files={ - "file": file, - }, + response = self._raw_client.upload_csv( + id, + file=file, + version_id=version_id, + environment=environment, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -949,37 +756,10 @@ def set_deployment( version_id="dsv_6L78pqrdFi2xa", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1016,28 +796,8 @@ def remove_deployment( environment_id="staging", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1069,39 +829,24 @@ def list_environments( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data class AsyncDatasetsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawDatasetsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawDatasetsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawDatasetsClient + """ + return self._raw_client async def list( self, @@ -1170,7 +915,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "datasets", method="GET", params={ @@ -1230,7 +975,8 @@ async def upsert( id: typing.Optional[str] = OMIT, action: typing.Optional[UpdateDatesetAction] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DatasetResponse: """ @@ -1244,9 +990,9 @@ async def upsert( the `version_id` or `environment` query parameters to identify the existing version to base the new version on. If neither is provided, the latest created version will be used. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, @@ -1284,8 +1030,11 @@ async def upsert( attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. - commit_message : typing.Optional[str] - Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1320,59 +1069,25 @@ async def main() -> None: }, ], action="set", - commit_message="Add two new questions and answers", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "datasets", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, - json={ - "path": path, - "id": id, - "datapoints": convert_and_respect_annotation_metadata( - object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" - ), - "action": action, - "attributes": attributes, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.upsert( + datapoints=datapoints, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, + path=path, + id=id, + action=action, + attributes=attributes, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, @@ -1437,39 +1152,14 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - "include_datapoints": include_datapoints, - }, + response = await self._raw_client.get( + id, + version_id=version_id, + environment=environment, + include_datapoints=include_datapoints, request_options=request_options, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -1506,28 +1196,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def move( self, @@ -1578,42 +1248,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data async def list_datapoints( self, @@ -1679,7 +1315,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( f"datasets/{jsonable_encoder(id)}/datapoints", method="GET", params={ @@ -1729,8 +1365,7 @@ async def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, - include_datapoints: typing.Optional[typing.Literal["latest_committed"]] = None, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListDatasets: """ @@ -1741,11 +1376,8 @@ async def list_versions( id : str Unique identifier for Dataset. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - - include_datapoints : typing.Optional[typing.Literal["latest_committed"]] - If set to 'latest_committed', include the Datapoints for the latest committed version. Defaults to `None`. + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1769,52 +1401,21 @@ async def list_versions( async def main() -> None: await client.datasets.list_versions( id="ds_b0baF1ca7652", - status="committed", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "include_datapoints": include_datapoints, - }, - request_options=request_options, + response = await self._raw_client.list_versions( + id, include_datapoints=include_datapoints, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListDatasets, - construct_type( - type_=ListDatasets, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - async def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> DatasetResponse: + async def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Dataset with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Dataset. Parameters ---------- @@ -1824,16 +1425,12 @@ async def commit( version_id : str Unique identifier for the specific version of the Dataset. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DatasetResponse - Successful Response + None Examples -------- @@ -1847,53 +1444,28 @@ async def commit( async def main() -> None: - await client.datasets.commit( - id="ds_b0baF1ca7652", - version_id="dsv_6L78pqrdFi2xa", - commit_message="initial commit", + await client.datasets.delete_dataset_version( + id="id", + version_id="version_id", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete_dataset_version(id, version_id, request_options=request_options) + return response.data - async def delete_dataset_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DatasetResponse: """ - Delete a version of the Dataset. + Update the name or description of the Dataset version. Parameters ---------- @@ -1903,12 +1475,19 @@ async def delete_dataset_version( version_id : str Unique identifier for the specific version of the Dataset. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + DatasetResponse + Successful Response Examples -------- @@ -1922,7 +1501,7 @@ async def delete_dataset_version( async def main() -> None: - await client.datasets.delete_dataset_version( + await client.datasets.update_dataset_version( id="id", version_id="version_id", ) @@ -1930,49 +1509,35 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = await self._raw_client.update_dataset_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def upload_csv( self, id: str, *, file: core.File, - commit_message: str, version_id: typing.Optional[str] = None, environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DatasetResponse: """ Add Datapoints from a CSV file to a Dataset. - This will create a new committed version of the Dataset with the Datapoints from the CSV file. + This will create a new version of the Dataset with the Datapoints from the CSV file. If either `version_id` or `environment` is provided, the new version will be based on the specified version, with the Datapoints from the CSV file added to the existing Datapoints in the version. If neither `version_id` nor `environment` is provided, the new version will be based on the version of the Dataset that is deployed to the default Environment. + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + Parameters ---------- id : str @@ -1981,15 +1546,18 @@ async def upload_csv( file : core.File See core.File for more documentation - commit_message : str - Commit message for the new Dataset version. - version_id : typing.Optional[str] ID of the specific Dataset version to base the created Version on. environment : typing.Optional[str] Name of the Environment identifying a deployed Version to base the created Version on. + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -2012,51 +1580,21 @@ async def upload_csv( async def main() -> None: await client.datasets.upload_csv( id="id", - commit_message="commit_message", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/datapoints/csv", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - data={ - "commit_message": commit_message, - }, - files={ - "file": file, - }, + response = await self._raw_client.upload_csv( + id, + file=file, + version_id=version_id, + environment=environment, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -2106,37 +1644,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DatasetResponse, - construct_type( - type_=DatasetResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2181,28 +1692,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data async def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2242,31 +1733,5 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"datasets/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data diff --git a/src/humanloop/datasets/raw_client.py b/src/humanloop/datasets/raw_client.py new file mode 100644 index 00000000..5cc9785c --- /dev/null +++ b/src/humanloop/datasets/raw_client.py @@ -0,0 +1,1509 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..requests.create_datapoint_request import CreateDatapointRequestParams +from ..types.update_dateset_action import UpdateDatesetAction +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.dataset_response import DatasetResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..core.jsonable_encoder import jsonable_encoder +from .types.list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) +from ..types.list_datasets import ListDatasets +from .. import core +from ..types.file_environment_response import FileEnvironmentResponse +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawDatasetsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "datasets", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + json={ + "path": path, + "id": id, + "datapoints": convert_and_respect_annotation_metadata( + object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" + ), + "action": action, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListDatasets]: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListDatasets] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListDatasets, + construct_type( + type_=ListDatasets, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DatasetResponse]: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints/csv", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + data={ + "version_name": version_name, + "version_description": version_description, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DatasetResponse]: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DatasetResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawDatasetsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def upsert( + self, + *, + datapoints: typing.Sequence[CreateDatapointRequestParams], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + action: typing.Optional[UpdateDatesetAction] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Create a Dataset or update it with a new version if it already exists. + + Datasets are identified by the `ID` or their `path`. The datapoints determine the versions of the Dataset. + + By default, the new Dataset version will be set to the list of Datapoints provided in + the request. You can also create a new version by adding or removing Datapoints from an existing version + by specifying `action` as `add` or `remove` respectively. In this case, you may specify + the `version_id` or `environment` query parameters to identify the existing version to base + the new version on. If neither is provided, the latest created version will be used. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Dataset - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Humanloop also deduplicates Datapoints. If you try to add a Datapoint that already + exists, it will be ignored. If you intentionally want to add a duplicate Datapoint, + you can add a unique identifier to the Datapoint's inputs such as `{_dedupe_id: }`. + + Parameters + ---------- + datapoints : typing.Sequence[CreateDatapointRequestParams] + The Datapoints to create this Dataset version with. Modify the `action` field to determine how these Datapoints are used. + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. Only used when `action` is `"add"` or `"remove"`. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + path : typing.Optional[str] + Path of the Dataset, including the name. This locates the Dataset in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Dataset. + + action : typing.Optional[UpdateDatesetAction] + The action to take with the provided Datapoints. + + - If `"set"`, the created version will only contain the Datapoints provided in this request. + - If `"add"`, the created version will contain the Datapoints provided in this request in addition to the Datapoints in the target version. + - If `"remove"`, the created version will contain the Datapoints in the target version except for the Datapoints provided in this request. + + If `"add"` or `"remove"`, one of the `version_id` or `environment` query parameters may be provided. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Dataset. Helpful to separate Dataset versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Dataset version. Version names must be unique for a given Dataset. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "datasets", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + json={ + "path": path, + "id": id, + "datapoints": convert_and_respect_annotation_metadata( + object_=datapoints, annotation=typing.Sequence[CreateDatapointRequestParams], direction="write" + ), + "action": action, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + include_datapoints: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Retrieve the Dataset with the given ID. + + Unless `include_datapoints` is set to `true`, the response will not include + the Datapoints. + Use the List Datapoints endpoint (`GET /{id}/datapoints`) to efficiently + retrieve Datapoints for a large Dataset. + + By default, the deployed version of the Dataset is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : typing.Optional[str] + A specific Version ID of the Dataset to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + include_datapoints : typing.Optional[bool] + If set to `true`, include all Datapoints in the response. Defaults to `false`. Consider using the paginated List Datapoints endpoint instead. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Dataset with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Move the Dataset to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + path : typing.Optional[str] + Path of the Dataset including the Dataset name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Dataset, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + include_datapoints: typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListDatasets]: + """ + Get a list of the versions for a Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + include_datapoints : typing.Optional[ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints] + If set to 'latest_saved', include datapoints for the latest saved version. Alternatively, 'latest_committed' (deprecated) includes datapoints for the latest committed version only. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListDatasets] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "include_datapoints": include_datapoints, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListDatasets, + construct_type( + type_=ListDatasets, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_dataset_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_dataset_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Update the name or description of the Dataset version. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + version_id : str + Unique identifier for the specific version of the Dataset. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upload_csv( + self, + id: str, + *, + file: core.File, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Add Datapoints from a CSV file to a Dataset. + + This will create a new version of the Dataset with the Datapoints from the CSV file. + + If either `version_id` or `environment` is provided, the new version will be based on the specified version, + with the Datapoints from the CSV file added to the existing Datapoints in the version. + If neither `version_id` nor `environment` is provided, the new version will be based on the version + of the Dataset that is deployed to the default Environment. + + You can optionally provide a name and description for the new version using `version_name` + and `version_description` parameters. + + Parameters + ---------- + id : str + Unique identifier for the Dataset + + file : core.File + See core.File for more documentation + + version_id : typing.Optional[str] + ID of the specific Dataset version to base the created Version on. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed Version to base the created Version on. + + version_name : typing.Optional[str] + Name for the new Dataset version. + + version_description : typing.Optional[str] + Description for the new Dataset version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/datapoints/csv", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + data={ + "version_name": version_name, + "version_description": version_description, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DatasetResponse]: + """ + Deploy Dataset to Environment. + + Set the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DatasetResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DatasetResponse, + construct_type( + type_=DatasetResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Dataset from Environment. + + Remove the deployed version for the specified Environment. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Dataset. + + Parameters + ---------- + id : str + Unique identifier for Dataset. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"datasets/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/datasets/types/__init__.py b/src/humanloop/datasets/types/__init__.py new file mode 100644 index 00000000..a84489fe --- /dev/null +++ b/src/humanloop/datasets/types/__init__.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +from .list_versions_datasets_id_versions_get_request_include_datapoints import ( + ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints, +) + +__all__ = ["ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints"] diff --git a/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py b/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py new file mode 100644 index 00000000..6c04f917 --- /dev/null +++ b/src/humanloop/datasets/types/list_versions_datasets_id_versions_get_request_include_datapoints.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ListVersionsDatasetsIdVersionsGetRequestIncludeDatapoints = typing.Union[ + typing.Literal["latest_committed", "latest_saved"], typing.Any +] diff --git a/src/humanloop/directories/client.py b/src/humanloop/directories/client.py index 49515698..1d5383f3 100644 --- a/src/humanloop/directories/client.py +++ b/src/humanloop/directories/client.py @@ -2,16 +2,12 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawDirectoriesClient from ..core.request_options import RequestOptions from ..types.directory_response import DirectoryResponse -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse -from ..core.jsonable_encoder import jsonable_encoder from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawDirectoriesClient # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -19,7 +15,18 @@ class DirectoriesClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawDirectoriesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawDirectoriesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawDirectoriesClient + """ + return self._raw_client def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: """ @@ -44,34 +51,8 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> ty ) client.directories.list() """ - _response = self._client_wrapper.httpx_client.request( - "directories", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[DirectoryResponse], - construct_type( - type_=typing.List[DirectoryResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list(request_options=request_options) + return response.data def create( self, @@ -112,43 +93,8 @@ def create( ) client.directories.create() """ - _response = self._client_wrapper.httpx_client.request( - "directories", - method="POST", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.create(name=name, parent_id=parent_id, path=path, request_options=request_options) + return response.data def get( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -180,34 +126,8 @@ def get( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryWithParentsAndChildrenResponse, - construct_type( - type_=DirectoryWithParentsAndChildrenResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.get(id, request_options=request_options) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -238,28 +158,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def update( self, @@ -306,48 +206,26 @@ def update( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="PATCH", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update( + id, name=name, parent_id=parent_id, path=path, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncDirectoriesClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawDirectoriesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawDirectoriesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawDirectoriesClient + """ + return self._raw_client async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[DirectoryResponse]: """ @@ -380,34 +258,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "directories", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[DirectoryResponse], - construct_type( - type_=typing.List[DirectoryResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list(request_options=request_options) + return response.data async def create( self, @@ -456,43 +308,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "directories", - method="POST", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.create( + name=name, parent_id=parent_id, path=path, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -532,34 +351,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryWithParentsAndChildrenResponse, - construct_type( - type_=DirectoryWithParentsAndChildrenResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.get(id, request_options=request_options) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -598,28 +391,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def update( self, @@ -674,40 +447,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"directories/{jsonable_encoder(id)}", - method="PATCH", - json={ - "name": name, - "parent_id": parent_id, - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update( + id, name=name, parent_id=parent_id, path=path, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - DirectoryResponse, - construct_type( - type_=DirectoryResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/directories/raw_client.py b/src/humanloop/directories/raw_client.py new file mode 100644 index 00000000..36f4b188 --- /dev/null +++ b/src/humanloop/directories/raw_client.py @@ -0,0 +1,587 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.directory_response import DirectoryResponse +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.directory_with_parents_and_children_response import DirectoryWithParentsAndChildrenResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawDirectoriesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[DirectoryResponse]]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[DirectoryResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "directories", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[DirectoryResponse], + construct_type( + type_=typing.List[DirectoryResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DirectoryResponse]: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "directories", + method="POST", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[DirectoryWithParentsAndChildrenResponse]: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryWithParentsAndChildrenResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryWithParentsAndChildrenResponse, + construct_type( + type_=DirectoryWithParentsAndChildrenResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[DirectoryResponse]: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[DirectoryResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawDirectoriesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[DirectoryResponse]]: + """ + Retrieve a list of all Directories. + + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[DirectoryResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "directories", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[DirectoryResponse], + construct_type( + type_=typing.List[DirectoryResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create( + self, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DirectoryResponse]: + """ + Creates a Directory. + + Parameters + ---------- + name : typing.Optional[str] + Name of the directory to create. + + parent_id : typing.Optional[str] + ID of the parent directory. Starts with `dir_`. + + path : typing.Optional[str] + Path to create the directory in, relative to the root directory. If the path does not exist, it will be created. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "directories", + method="POST", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse]: + """ + Fetches a directory by ID. + + Parameters + ---------- + id : str + String ID of directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryWithParentsAndChildrenResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryWithParentsAndChildrenResponse, + construct_type( + type_=DirectoryWithParentsAndChildrenResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Directory with the given ID. + + The Directory must be empty (i.e. contain no Directories or Files). + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update( + self, + id: str, + *, + name: typing.Optional[str] = OMIT, + parent_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[DirectoryResponse]: + """ + Update the Directory with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Directory. Starts with `dir_`. + + name : typing.Optional[str] + Name to set for the directory. + + parent_id : typing.Optional[str] + ID of the parent directory. Specify this to move directories. Starts with `dir_`. + + path : typing.Optional[str] + Path to move the directory to, relative to the root directory. Specify this to move directories. Includes name, e.g. `path/to/directory`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[DirectoryResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"directories/{jsonable_encoder(id)}", + method="PATCH", + json={ + "name": name, + "parent_id": parent_id, + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + DirectoryResponse, + construct_type( + type_=DirectoryResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/evaluations/client.py b/src/humanloop/evaluations/client.py index 3a75a86d..9a32433a 100644 --- a/src/humanloop/evaluations/client.py +++ b/src/humanloop/evaluations/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawEvaluationsClient from ..core.request_options import RequestOptions from ..core.pagination import SyncPager from ..types.evaluation_response import EvaluationResponse @@ -13,9 +14,7 @@ from ..core.api_error import ApiError from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams from ..requests.file_request import FileRequestParams -from ..core.serialization import convert_and_respect_annotation_metadata from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams -from ..core.jsonable_encoder import jsonable_encoder from ..types.evaluation_runs_response import EvaluationRunsResponse from .requests.create_run_request_dataset import CreateRunRequestDatasetParams from .requests.create_run_request_version import CreateRunRequestVersionParams @@ -24,6 +23,7 @@ from ..types.evaluation_stats import EvaluationStats from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawEvaluationsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -32,7 +32,18 @@ class EvaluationsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawEvaluationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawEvaluationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawEvaluationsClient + """ + return self._raw_client def list( self, @@ -82,7 +93,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "evaluations", method="GET", params={ @@ -170,49 +181,8 @@ def create( evaluators=[{"version_id": "version_id"}], ) """ - _response = self._client_wrapper.httpx_client.request( - "evaluations", - method="POST", - json={ - "file": convert_and_respect_annotation_metadata( - object_=file, annotation=FileRequestParams, direction="write" - ), - "name": name, - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.create(evaluators=evaluators, file=file, name=name, request_options=request_options) + return response.data def add_evaluators( self, @@ -254,45 +224,8 @@ def add_evaluators( evaluators=[{"version_id": "version_id"}], ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) + return response.data def remove_evaluator( self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -330,34 +263,8 @@ def remove_evaluator( evaluator_version_id="evaluator_version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) + return response.data def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: """ @@ -393,34 +300,8 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non id="ev_567yza", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.get(id, request_options=request_options) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -451,28 +332,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="ev_567yza", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def list_runs_for_evaluation( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -504,34 +365,8 @@ def list_runs_for_evaluation( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunsResponse, - construct_type( - type_=EvaluationRunsResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_runs_for_evaluation(id, request_options=request_options) + return response.data def create_run( self, @@ -595,48 +430,15 @@ def create_run( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="POST", - json={ - "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" - ), - "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" - ), - "orchestrated": orchestrated, - "use_existing_logs": use_existing_logs, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.create_run( + id, + dataset=dataset, + version=version, + orchestrated=orchestrated, + use_existing_logs=use_existing_logs, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def add_existing_run( self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -675,34 +477,8 @@ def add_existing_run( run_id="run_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.add_existing_run(id, run_id, request_options=request_options) + return response.data def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -738,28 +514,8 @@ def remove_run(self, id: str, run_id: str, *, request_options: typing.Optional[R run_id="run_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_run(id, run_id, request_options=request_options) + return response.data def update_evaluation_run( self, @@ -810,42 +566,10 @@ def update_evaluation_run( run_id="run_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="PATCH", - json={ - "control": control, - "status": status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update_evaluation_run( + id, run_id, control=control, status=status, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def add_logs_to_run( self, @@ -890,41 +614,8 @@ def add_logs_to_run( log_ids=["log_ids"], ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", - method="POST", - json={ - "log_ids": log_ids, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) + return response.data def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: """ @@ -957,34 +648,8 @@ def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/stats", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationStats, - construct_type( - type_=EvaluationStats, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.get_stats(id, request_options=request_options) + return response.data def get_logs( self, @@ -1033,44 +698,24 @@ def get_logs( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/logs", - method="GET", - params={ - "page": page, - "size": size, - "run_id": run_id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedDataEvaluationLogResponse, - construct_type( - type_=PaginatedDataEvaluationLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.get_logs(id, page=page, size=size, run_id=run_id, request_options=request_options) + return response.data class AsyncEvaluationsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawEvaluationsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawEvaluationsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawEvaluationsClient + """ + return self._raw_client async def list( self, @@ -1128,7 +773,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "evaluations", method="GET", params={ @@ -1224,49 +869,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "evaluations", - method="POST", - json={ - "file": convert_and_respect_annotation_metadata( - object_=file, annotation=FileRequestParams, direction="write" - ), - "name": name, - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.create( + evaluators=evaluators, file=file, name=name, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def add_evaluators( self, @@ -1316,45 +922,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "evaluators": convert_and_respect_annotation_metadata( - object_=evaluators, - annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], - direction="write", - ), - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.add_evaluators(id, evaluators=evaluators, request_options=request_options) + return response.data async def remove_evaluator( self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1400,34 +969,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_evaluator(id, evaluator_version_id, request_options=request_options) + return response.data async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationResponse: """ @@ -1471,34 +1014,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationResponse, - construct_type( - type_=EvaluationResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.get(id, request_options=request_options) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -1537,28 +1054,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def list_runs_for_evaluation( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1598,34 +1095,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunsResponse, - construct_type( - type_=EvaluationRunsResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_runs_for_evaluation(id, request_options=request_options) + return response.data async def create_run( self, @@ -1697,48 +1168,15 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs", - method="POST", - json={ - "dataset": convert_and_respect_annotation_metadata( - object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" - ), - "version": convert_and_respect_annotation_metadata( - object_=version, annotation=CreateRunRequestVersionParams, direction="write" - ), - "orchestrated": orchestrated, - "use_existing_logs": use_existing_logs, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.create_run( + id, + dataset=dataset, + version=version, + orchestrated=orchestrated, + use_existing_logs=use_existing_logs, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def add_existing_run( self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1785,34 +1223,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.add_existing_run(id, run_id, request_options=request_options) + return response.data async def remove_run( self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1858,28 +1270,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_run(id, run_id, request_options=request_options) + return response.data async def update_evaluation_run( self, @@ -1938,42 +1330,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", - method="PATCH", - json={ - "control": control, - "status": status, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update_evaluation_run( + id, run_id, control=control, status=status, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def add_logs_to_run( self, @@ -2026,41 +1386,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", - method="POST", - json={ - "log_ids": log_ids, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationRunResponse, - construct_type( - type_=EvaluationRunResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.add_logs_to_run(id, run_id, log_ids=log_ids, request_options=request_options) + return response.data async def get_stats(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> EvaluationStats: """ @@ -2101,34 +1428,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/stats", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluationStats, - construct_type( - type_=EvaluationStats, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.get_stats(id, request_options=request_options) + return response.data async def get_logs( self, @@ -2185,36 +1486,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluations/{jsonable_encoder(id)}/logs", - method="GET", - params={ - "page": page, - "size": size, - "run_id": run_id, - }, - request_options=request_options, + response = await self._raw_client.get_logs( + id, page=page, size=size, run_id=run_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedDataEvaluationLogResponse, - construct_type( - type_=PaginatedDataEvaluationLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/evaluations/raw_client.py b/src/humanloop/evaluations/raw_client.py new file mode 100644 index 00000000..082a30d2 --- /dev/null +++ b/src/humanloop/evaluations/raw_client.py @@ -0,0 +1,1659 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from .requests.create_evaluation_request_evaluators_item import CreateEvaluationRequestEvaluatorsItemParams +from ..requests.file_request import FileRequestParams +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.evaluation_response import EvaluationResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .requests.add_evaluators_request_evaluators_item import AddEvaluatorsRequestEvaluatorsItemParams +from ..core.jsonable_encoder import jsonable_encoder +from ..types.evaluation_runs_response import EvaluationRunsResponse +from .requests.create_run_request_dataset import CreateRunRequestDatasetParams +from .requests.create_run_request_version import CreateRunRequestVersionParams +from ..types.evaluation_run_response import EvaluationRunResponse +from ..types.evaluation_status import EvaluationStatus +from ..types.evaluation_stats import EvaluationStats +from ..types.paginated_data_evaluation_log_response import PaginatedDataEvaluationLogResponse +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawEvaluationsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationResponse]: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "file": convert_and_respect_annotation_metadata( + object_=file, annotation=FileRequestParams, direction="write" + ), + "name": name, + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationResponse]: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationResponse]: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationResponse]: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationRunsResponse]: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunsResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunsResponse, + construct_type( + type_=EvaluationRunsResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="POST", + json={ + "dataset": convert_and_respect_annotation_metadata( + object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + ), + "version": convert_and_respect_annotation_metadata( + object_=version, annotation=CreateRunRequestVersionParams, direction="write" + ), + "orchestrated": orchestrated, + "use_existing_logs": use_existing_logs, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.Optional[typing.Any]]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.Optional[typing.Any]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="PATCH", + json={ + "control": control, + "status": status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluationRunResponse]: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", + method="POST", + json={ + "log_ids": log_ids, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_stats( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluationStats]: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluationStats] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/stats", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationStats, + construct_type( + type_=EvaluationStats, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PaginatedDataEvaluationLogResponse]: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PaginatedDataEvaluationLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/logs", + method="GET", + params={ + "page": page, + "size": size, + "run_id": run_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataEvaluationLogResponse, + construct_type( + type_=PaginatedDataEvaluationLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawEvaluationsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def create( + self, + *, + evaluators: typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + file: typing.Optional[FileRequestParams] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Create an Evaluation. + + Create a new Evaluation by specifying the File to evaluate, and a name + for the Evaluation. + You can then add Runs to this Evaluation using the `POST /evaluations/{id}/runs` endpoint. + + Parameters + ---------- + evaluators : typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams] + The Evaluators used to evaluate. + + file : typing.Optional[FileRequestParams] + The File to associate with the Evaluation. This File contains the Logs you're evaluating. + + name : typing.Optional[str] + Name of the Evaluation to help identify it. Must be unique within the associated File. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluations", + method="POST", + json={ + "file": convert_and_respect_annotation_metadata( + object_=file, annotation=FileRequestParams, direction="write" + ), + "name": name, + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[CreateEvaluationRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_evaluators( + self, + id: str, + *, + evaluators: typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Add Evaluators to an Evaluation. + + The Evaluators will be run on the Logs generated for the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluators : typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams] + The Evaluators to add to this Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "evaluators": convert_and_respect_annotation_metadata( + object_=evaluators, + annotation=typing.Sequence[AddEvaluatorsRequestEvaluatorsItemParams], + direction="write", + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_evaluator( + self, id: str, evaluator_version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Remove an Evaluator from an Evaluation. + + The Evaluator will no longer be run on the Logs in the Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + evaluator_version_id : str + Unique identifier for Evaluator Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/evaluators/{jsonable_encoder(evaluator_version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationResponse]: + """ + Get an Evaluation. + + This includes the Evaluators associated with the Evaluation and metadata about the Evaluation, + such as its name. + + To get the Runs associated with the Evaluation, use the `GET /evaluations/{id}/runs` endpoint. + To retrieve stats for the Evaluation, use the `GET /evaluations/{id}/stats` endpoint. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationResponse, + construct_type( + type_=EvaluationResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete an Evaluation. + + The Runs and Evaluators in the Evaluation will not be deleted. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_runs_for_evaluation( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationRunsResponse]: + """ + List all Runs for an Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunsResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunsResponse, + construct_type( + type_=EvaluationRunsResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_run( + self, + id: str, + *, + dataset: typing.Optional[CreateRunRequestDatasetParams] = OMIT, + version: typing.Optional[CreateRunRequestVersionParams] = OMIT, + orchestrated: typing.Optional[bool] = OMIT, + use_existing_logs: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Create an Evaluation Run. + + Optionally specify the Dataset and version to be evaluated. + + Humanloop will automatically start generating Logs and running Evaluators where + `orchestrated=true`. If you are generating Logs yourself, you can set `orchestrated=false` + and then generate and submit the required Logs via the API. + + If `dataset` and `version` are provided, you can set `use_existing_logs=True` to reuse existing Logs, + avoiding generating new Logs unnecessarily. Logs that are associated with the specified Version and have `source_datapoint_id` + referencing a datapoint in the specified Dataset will be associated with the Run. + + To keep updated on the progress of the Run, you can poll the Run using + the `GET /evaluations/{id}/runs` endpoint and check its status. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + dataset : typing.Optional[CreateRunRequestDatasetParams] + Dataset to use in this Run. + + version : typing.Optional[CreateRunRequestVersionParams] + Version to use in this Run. + + orchestrated : typing.Optional[bool] + Whether the Run is orchestrated by Humanloop. If `True`, Humanloop will generate Logs for the Run; `dataset` and `version` must be provided. If `False`, a log for the Prompt/Tool should be submitted by the user via the API. + + use_existing_logs : typing.Optional[bool] + If `True`, the Run will be initialized with existing Logs associated with the Dataset and Version. If `False`, the Run will be initialized with no Logs. Can only be set to `True` when both `dataset` and `version` are provided. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs", + method="POST", + json={ + "dataset": convert_and_respect_annotation_metadata( + object_=dataset, annotation=CreateRunRequestDatasetParams, direction="write" + ), + "version": convert_and_respect_annotation_metadata( + object_=version, annotation=CreateRunRequestVersionParams, direction="write" + ), + "orchestrated": orchestrated, + "use_existing_logs": use_existing_logs, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_existing_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.Optional[typing.Any]]: + """ + Add an existing Run to the specified Evaluation. + + This is useful if you want to compare the Runs in this Evaluation with an existing Run + that exists within another Evaluation. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.Optional[typing.Any]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_run( + self, id: str, run_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove a Run from an Evaluation. + + The Logs and Versions used in the Run will not be deleted. + If this Run is used in any other Evaluations, it will still be available in those Evaluations. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_evaluation_run( + self, + id: str, + run_id: str, + *, + control: typing.Optional[bool] = OMIT, + status: typing.Optional[EvaluationStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Update an Evaluation Run. + + Specify `control=true` to use this Run as the control Run for the Evaluation. + You can cancel a running/pending Run, or mark a Run that uses external or human Evaluators as completed. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + control : typing.Optional[bool] + If `True`, this Run will be used as the control in the Evaluation. Stats for other Runs will be compared to this Run. This will replace any existing control Run. + + status : typing.Optional[EvaluationStatus] + Used to set the Run to `cancelled` or `completed`. Can only be used if the Run is currently `pending` or `running`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}", + method="PATCH", + json={ + "control": control, + "status": status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_logs_to_run( + self, + id: str, + run_id: str, + *, + log_ids: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluationRunResponse]: + """ + Add the specified Logs to a Run. + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + run_id : str + Unique identifier for Run. + + log_ids : typing.Sequence[str] + The IDs of the Logs to add to the Run. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationRunResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/runs/{jsonable_encoder(run_id)}/logs", + method="POST", + json={ + "log_ids": log_ids, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationRunResponse, + construct_type( + type_=EvaluationRunResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_stats( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluationStats]: + """ + Get Evaluation Stats. + + Retrieve aggregate stats for the specified Evaluation. This includes the number of generated Logs for each Run and the + corresponding Evaluator statistics (such as the mean and percentiles). + + Parameters + ---------- + id : str + Unique identifier for Evaluation. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluationStats] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/stats", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluationStats, + construct_type( + type_=EvaluationStats, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get_logs( + self, + id: str, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + run_id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PaginatedDataEvaluationLogResponse]: + """ + Get the Logs associated to a specific Evaluation. + + This returns the Logs associated to all Runs within with the Evaluation. + + Parameters + ---------- + id : str + String ID of evaluation. Starts with `ev_` or `evr_`. + + page : typing.Optional[int] + Page number for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of Logs to fetch. + + run_id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Filter by Run IDs. Only Logs for the specified Runs will be returned. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PaginatedDataEvaluationLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluations/{jsonable_encoder(id)}/logs", + method="GET", + params={ + "page": page, + "size": size, + "run_id": run_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataEvaluationLogResponse, + construct_type( + type_=PaginatedDataEvaluationLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py index c97648e9..2a573e61 100644 --- a/src/humanloop/evaluators/client.py +++ b/src/humanloop/evaluators/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawEvaluatorsClient import datetime as dt from ..types.log_status import LogStatus from ..requests.chat_message import ChatMessageParams @@ -9,20 +10,17 @@ from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams from ..core.request_options import RequestOptions from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.pagination import SyncPager from ..types.evaluator_response import EvaluatorResponse from ..types.paginated_data_evaluator_response import PaginatedDataEvaluatorResponse +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError from .requests.evaluator_request_spec import EvaluatorRequestSpecParams -from ..core.jsonable_encoder import jsonable_encoder -from ..types.version_status import VersionStatus from ..types.list_evaluators import ListEvaluators from ..types.file_environment_response import FileEnvironmentResponse from ..requests.evaluator_activation_deactivation_request_activate_item import ( @@ -32,6 +30,7 @@ EvaluatorActivationDeactivationRequestDeactivateItemParams, ) from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawEvaluatorsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -40,7 +39,18 @@ class EvaluatorsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawEvaluatorsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawEvaluatorsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawEvaluatorsClient + """ + return self._raw_client def log( self, @@ -184,76 +194,38 @@ def log( parent_id="parent_id", ) """ - _response = self._client_wrapper.httpx_client.request( - "evaluators/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "parent_id": parent_id, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": create_evaluator_log_request_environment, - "save": save, - "log_id": log_id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "judgment": convert_and_respect_annotation_metadata( - object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" - ), - "marked_completed": marked_completed, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.log( + parent_id=parent_id, + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + create_evaluator_log_request_environment=create_evaluator_log_request_environment, + save=save, + log_id=log_id, + output_message=output_message, + judgment=judgment, + marked_completed=marked_completed, + spec=spec, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateEvaluatorLogResponse, - construct_type( - type_=CreateEvaluatorLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list( self, @@ -314,7 +286,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "evaluators", method="GET", params={ @@ -369,7 +341,8 @@ def upsert( spec: EvaluatorRequestSpecParams, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EvaluatorResponse: """ @@ -377,9 +350,9 @@ def upsert( Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -391,8 +364,11 @@ def upsert( id : typing.Optional[str] ID for an existing Evaluator. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -417,49 +393,17 @@ def upsert( "evaluator_type": "python", "code": "def evaluate(answer, target):\n return 0.5", }, - commit_message="Initial commit", ) """ - _response = self._client_wrapper.httpx_client.request( - "evaluators", - method="POST", - json={ - "path": path, - "id": id, - "commit_message": commit_message, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.upsert( + spec=spec, + path=path, + id=id, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def get( self, @@ -505,38 +449,10 @@ def get( id="ev_890bcd", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -565,28 +481,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="ev_890bcd", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def move( self, @@ -630,48 +526,13 @@ def move( path="new directory/new name", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListEvaluators: @@ -683,9 +544,6 @@ def list_versions( id : str Unique identifier for the Evaluator. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -708,65 +566,31 @@ def list_versions( id="ev_890bcd", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListEvaluators, - construct_type( - type_=ListEvaluators, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluatorResponse: + def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Evaluator with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Evaluator. Parameters ---------- id : str - Unique identifier for Prompt. + Unique identifier for Evaluator. version_id : str Unique identifier for the specific version of the Evaluator. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EvaluatorResponse - Successful Response + None Examples -------- @@ -775,50 +599,25 @@ def commit( client = Humanloop( api_key="YOUR_API_KEY", ) - client.evaluators.commit( - id="ev_890bcd", - version_id="evv_012def", - commit_message="Initial commit", + client.evaluators.delete_evaluator_version( + id="id", + version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) + return response.data - def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: """ - Delete a version of the Evaluator. + Update the name or description of the Evaluator version. Parameters ---------- @@ -828,12 +627,19 @@ def delete_evaluator_version( version_id : str Unique identifier for the specific version of the Evaluator. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + EvaluatorResponse + Successful Response Examples -------- @@ -842,33 +648,15 @@ def delete_evaluator_version( client = Humanloop( api_key="YOUR_API_KEY", ) - client.evaluators.delete_evaluator_version( + client.evaluators.update_evaluator_version( id="id", version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = self._raw_client.update_evaluator_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -911,37 +699,10 @@ def set_deployment( version_id="evv_012def", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -979,28 +740,8 @@ def remove_deployment( environment_id="staging", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1032,34 +773,8 @@ def list_environments( id="ev_890bcd", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data def update_monitoring( self, @@ -1104,52 +819,26 @@ def update_monitoring( id="id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncEvaluatorsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawEvaluatorsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawEvaluatorsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawEvaluatorsClient + """ + return self._raw_client async def log( self, @@ -1301,76 +990,38 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "evaluators/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "parent_id": parent_id, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": create_evaluator_log_request_environment, - "save": save, - "log_id": log_id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "judgment": convert_and_respect_annotation_metadata( - object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" - ), - "marked_completed": marked_completed, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.log( + parent_id=parent_id, + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + create_evaluator_log_request_environment=create_evaluator_log_request_environment, + save=save, + log_id=log_id, + output_message=output_message, + judgment=judgment, + marked_completed=marked_completed, + spec=spec, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateEvaluatorLogResponse, - construct_type( - type_=CreateEvaluatorLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list( self, @@ -1439,7 +1090,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "evaluators", method="GET", params={ @@ -1494,7 +1145,8 @@ async def upsert( spec: EvaluatorRequestSpecParams, path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EvaluatorResponse: """ @@ -1502,9 +1154,9 @@ async def upsert( Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -1516,8 +1168,11 @@ async def upsert( id : typing.Optional[str] ID for an existing Evaluator. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1547,52 +1202,20 @@ async def main() -> None: "evaluator_type": "python", "code": "def evaluate(answer, target):\n return 0.5", }, - commit_message="Initial commit", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "evaluators", - method="POST", - json={ - "path": path, - "id": id, - "commit_message": commit_message, - "spec": convert_and_respect_annotation_metadata( - object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.upsert( + spec=spec, + path=path, + id=id, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, @@ -1646,38 +1269,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -1714,28 +1309,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def move( self, @@ -1787,48 +1362,13 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data async def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListEvaluators: @@ -1840,9 +1380,6 @@ async def list_versions( id : str Unique identifier for the Evaluator. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -1873,65 +1410,31 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListEvaluators, - construct_type( - type_=ListEvaluators, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - async def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> EvaluatorResponse: + async def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Evaluator with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Evaluator. Parameters ---------- id : str - Unique identifier for Prompt. + Unique identifier for Evaluator. version_id : str Unique identifier for the specific version of the Evaluator. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - EvaluatorResponse - Successful Response + None Examples -------- @@ -1945,53 +1448,28 @@ async def commit( async def main() -> None: - await client.evaluators.commit( - id="ev_890bcd", - version_id="evv_012def", - commit_message="Initial commit", + await client.evaluators.delete_evaluator_version( + id="id", + version_id="version_id", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete_evaluator_version(id, version_id, request_options=request_options) + return response.data - async def delete_evaluator_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> EvaluatorResponse: """ - Delete a version of the Evaluator. + Update the name or description of the Evaluator version. Parameters ---------- @@ -2001,12 +1479,19 @@ async def delete_evaluator_version( version_id : str Unique identifier for the specific version of the Evaluator. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + EvaluatorResponse + Successful Response Examples -------- @@ -2020,7 +1505,7 @@ async def delete_evaluator_version( async def main() -> None: - await client.evaluators.delete_evaluator_version( + await client.evaluators.update_evaluator_version( id="id", version_id="version_id", ) @@ -2028,28 +1513,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = await self._raw_client.update_evaluator_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -2100,37 +1567,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2176,28 +1616,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data async def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2237,34 +1657,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data async def update_monitoring( self, @@ -2317,44 +1711,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"evaluators/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - EvaluatorResponse, - construct_type( - type_=EvaluatorResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/evaluators/raw_client.py b/src/humanloop/evaluators/raw_client.py new file mode 100644 index 00000000..5344f43f --- /dev/null +++ b/src/humanloop/evaluators/raw_client.py @@ -0,0 +1,1780 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +import datetime as dt +from ..types.log_status import LogStatus +from ..requests.chat_message import ChatMessageParams +from .requests.create_evaluator_log_request_judgment import CreateEvaluatorLogRequestJudgmentParams +from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .requests.evaluator_request_spec import EvaluatorRequestSpecParams +from ..types.evaluator_response import EvaluatorResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..types.list_evaluators import ListEvaluators +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawEvaluatorsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateEvaluatorLogResponse]: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateEvaluatorLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "parent_id": parent_id, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": create_evaluator_log_request_environment, + "save": save, + "log_id": log_id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "judgment": convert_and_respect_annotation_metadata( + object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" + ), + "marked_completed": marked_completed, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateEvaluatorLogResponse, + construct_type( + type_=CreateEvaluatorLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "evaluators", + method="POST", + json={ + "path": path, + "id": id, + "version_name": version_name, + "version_description": version_description, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListEvaluators]: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListEvaluators] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListEvaluators, + construct_type( + type_=ListEvaluators, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[EvaluatorResponse]: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[EvaluatorResponse]: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[EvaluatorResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawEvaluatorsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + parent_id: str, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + create_evaluator_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + judgment: typing.Optional[CreateEvaluatorLogRequestJudgmentParams] = OMIT, + marked_completed: typing.Optional[bool] = OMIT, + spec: typing.Optional[CreateEvaluatorLogRequestSpecParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateEvaluatorLogResponse]: + """ + Submit Evaluator judgment for an existing Log. + + Creates a new Log. The evaluated Log will be set as the parent of the created Log. + + Parameters + ---------- + parent_id : str + Identifier of the evaluated Log. The newly created Log will have this one set as parent. + + version_id : typing.Optional[str] + ID of the Evaluator version to log against. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from the LLM. Only populated for LLM Evaluator Logs. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. Only populated for LLM Evaluator Logs. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. Only populated for LLM Evaluator Logs. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + create_evaluator_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the LLM. Only populated for LLM Evaluator Logs. + + judgment : typing.Optional[CreateEvaluatorLogRequestJudgmentParams] + Evaluator assessment of the Log. + + marked_completed : typing.Optional[bool] + Whether the Log has been manually marked as completed by a user. + + spec : typing.Optional[CreateEvaluatorLogRequestSpecParams] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateEvaluatorLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "parent_id": parent_id, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": create_evaluator_log_request_environment, + "save": save, + "log_id": log_id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "judgment": convert_and_respect_annotation_metadata( + object_=judgment, annotation=CreateEvaluatorLogRequestJudgmentParams, direction="write" + ), + "marked_completed": marked_completed, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=CreateEvaluatorLogRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateEvaluatorLogResponse, + construct_type( + type_=CreateEvaluatorLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + spec: EvaluatorRequestSpecParams, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Create an Evaluator or update it with a new version if it already exists. + + Evaluators are identified by the `ID` or their `path`. The spec provided determines the version of the Evaluator. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within an Evaluator - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + spec : EvaluatorRequestSpecParams + + path : typing.Optional[str] + Path of the Evaluator, including the name. This locates the Evaluator in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Evaluator. + + version_name : typing.Optional[str] + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "evaluators", + method="POST", + json={ + "path": path, + "id": id, + "version_name": version_name, + "version_description": version_description, + "spec": convert_and_respect_annotation_metadata( + object_=spec, annotation=EvaluatorRequestSpecParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Retrieve the Evaluator with the given ID. + + By default, the deployed version of the Evaluator is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : typing.Optional[str] + A specific Version ID of the Evaluator to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Evaluator with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Move the Evaluator to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + path : typing.Optional[str] + Path of the Evaluator including the Evaluator name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Evaluator, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListEvaluators]: + """ + Get a list of all the versions of an Evaluator. + + Parameters + ---------- + id : str + Unique identifier for the Evaluator. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListEvaluators] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListEvaluators, + construct_type( + type_=ListEvaluators, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_evaluator_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_evaluator_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Update the name or description of the Evaluator version. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Deploy Evaluator to an Environment. + + Set the deployed version for the specified Environment. This Evaluator + will be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Evaluator from the Environment. + + Remove the deployed version for the specified Environment. This Evaluator + will no longer be used for calls made to the Evaluator in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Evaluator. + + Parameters + ---------- + id : str + Unique identifier for Evaluator. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[EvaluatorResponse]: + """ + Activate and deactivate Evaluators for monitoring the Evaluator. + + An activated Evaluator will automatically be run on all new Logs + within the Evaluator for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[EvaluatorResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"evaluators/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + EvaluatorResponse, + construct_type( + type_=EvaluatorResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py index 126d775b..c07358d0 100644 --- a/src/humanloop/files/client.py +++ b/src/humanloop/files/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawFilesClient from ..types.file_type import FileType from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder @@ -9,13 +10,9 @@ from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, ) -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawFilesClient # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -23,7 +20,18 @@ class FilesClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawFilesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawFilesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawFilesClient + """ + return self._raw_client def list_files( self, @@ -84,44 +92,18 @@ def list_files( ) client.files.list_files() """ - _response = self._client_wrapper.httpx_client.request( - "files", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "template": template, - "type": type, - "environment": environment, - "sort_by": sort_by, - "order": order, - }, + response = self._raw_client.list_files( + page=page, + size=size, + name=name, + template=template, + type=type, + environment=environment, + sort_by=sort_by, + order=order, request_options=request_options, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, - construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def retrieve_by_path( self, @@ -160,49 +142,26 @@ def retrieve_by_path( path="path", ) """ - _response = self._client_wrapper.httpx_client.request( - "files/retrieve-by-path", - method="POST", - params={ - "environment": environment, - }, - json={ - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.retrieve_by_path( + path=path, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RetrieveByPathFilesRetrieveByPathPostResponse, - construct_type( - type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncFilesClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawFilesClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawFilesClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawFilesClient + """ + return self._raw_client async def list_files( self, @@ -271,44 +230,18 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "files", - method="GET", - params={ - "page": page, - "size": size, - "name": name, - "template": template, - "type": type, - "environment": environment, - "sort_by": sort_by, - "order": order, - }, + response = await self._raw_client.list_files( + page=page, + size=size, + name=name, + template=template, + type=type, + environment=environment, + sort_by=sort_by, + order=order, request_options=request_options, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, - construct_type( - type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def retrieve_by_path( self, @@ -355,41 +288,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "files/retrieve-by-path", - method="POST", - params={ - "environment": environment, - }, - json={ - "path": path, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.retrieve_by_path( + path=path, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - RetrieveByPathFilesRetrieveByPathPostResponse, - construct_type( - type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py new file mode 100644 index 00000000..19f52cf2 --- /dev/null +++ b/src/humanloop/files/raw_client.py @@ -0,0 +1,345 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..types.file_type import FileType +from ..types.project_sort_by import ProjectSortBy +from ..types.sort_order import SortOrder +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response import ( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, +) +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .types.retrieve_by_path_files_retrieve_by_path_post_response import RetrieveByPathFilesRetrieveByPathPostResponse +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawFilesClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "files", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "template": template, + "type": type, + "environment": environment, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + construct_type( + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "files/retrieve-by-path", + method="POST", + params={ + "environment": environment, + }, + json={ + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + RetrieveByPathFilesRetrieveByPathPostResponse, + construct_type( + type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawFilesClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def list_files( + self, + *, + page: typing.Optional[int] = None, + size: typing.Optional[int] = None, + name: typing.Optional[str] = None, + template: typing.Optional[bool] = None, + type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None, + environment: typing.Optional[str] = None, + sort_by: typing.Optional[ProjectSortBy] = None, + order: typing.Optional[SortOrder] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse]: + """ + Get a paginated list of files. + + Parameters + ---------- + page : typing.Optional[int] + Page offset for pagination. + + size : typing.Optional[int] + Page size for pagination. Number of files to fetch. + + name : typing.Optional[str] + Case-insensitive filter for file name. + + template : typing.Optional[bool] + Filter to include only template files. + + type : typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] + List of file types to filter for. + + environment : typing.Optional[str] + Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name. + + sort_by : typing.Optional[ProjectSortBy] + Field to sort files by + + order : typing.Optional[SortOrder] + Direction to sort by. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "files", + method="GET", + params={ + "page": page, + "size": size, + "name": name, + "template": template, + "type": type, + "environment": environment, + "sort_by": sort_by, + "order": order, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, + construct_type( + type_=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def retrieve_by_path( + self, + *, + path: str, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]: + """ + Retrieve a File by path. + + Parameters + ---------- + path : str + Path of the File to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "files/retrieve-by-path", + method="POST", + params={ + "environment": environment, + }, + json={ + "path": path, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + RetrieveByPathFilesRetrieveByPathPostResponse, + construct_type( + type_=RetrieveByPathFilesRetrieveByPathPostResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py index 69f678a4..f351f5ae 100644 --- a/src/humanloop/flows/client.py +++ b/src/humanloop/flows/client.py @@ -2,26 +2,24 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawFlowsClient from ..requests.chat_message import ChatMessageParams import datetime as dt from ..types.log_status import LogStatus from ..requests.flow_kernel_request import FlowKernelRequestParams from ..core.request_options import RequestOptions from ..types.create_flow_log_response import CreateFlowLogResponse -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from ..types.flow_log_response import FlowLogResponse -from ..core.jsonable_encoder import jsonable_encoder from ..types.flow_response import FlowResponse from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.pagination import SyncPager from ..types.paginated_data_flow_response import PaginatedDataFlowResponse -from ..types.version_status import VersionStatus +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError from ..types.list_flows import ListFlows from ..types.file_environment_response import FileEnvironmentResponse from ..requests.evaluator_activation_deactivation_request_activate_item import ( @@ -31,6 +29,7 @@ EvaluatorActivationDeactivationRequestDeactivateItemParams, ) from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawFlowsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -39,7 +38,18 @@ class FlowsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawFlowsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawFlowsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawFlowsClient + """ + return self._raw_client def log( self, @@ -211,75 +221,37 @@ def log( ), ) """ - _response = self._client_wrapper.httpx_client.request( - "flows/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "run_id": run_id, - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": flow_log_request_environment, - "save": save, - "log_id": log_id, - "flow": convert_and_respect_annotation_metadata( - object_=flow, annotation=FlowKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.log( + version_id=version_id, + environment=environment, + messages=messages, + output_message=output_message, + run_id=run_id, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + flow_log_request_environment=flow_log_request_environment, + save=save, + log_id=log_id, + flow=flow, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateFlowLogResponse, - construct_type( - type_=CreateFlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def update_log( self, @@ -348,50 +320,17 @@ def update_log( log_status="complete", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/logs/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.update_log( + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowLogResponse, - construct_type( - type_=FlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def get( self, @@ -437,38 +376,10 @@ def get( id="fl_6o701g4jmcanPVHxdqD0O", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -497,28 +408,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="fl_6o701g4jmcanPVHxdqD0O", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def move( self, @@ -566,43 +457,10 @@ def move( path="new directory/new name", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list( self, @@ -663,7 +521,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "flows", method="GET", params={ @@ -718,7 +576,8 @@ def upsert( attributes: typing.Dict[str, typing.Optional[typing.Any]], path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> FlowResponse: """ @@ -726,9 +585,9 @@ def upsert( Flows can also be identified by the `ID` or their `path`. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -741,8 +600,11 @@ def upsert( id : typing.Optional[str] ID for an existing Flow. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -772,54 +634,23 @@ def upsert( "description": "Retrieval tool for MedQA.", "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", }, - "commit_message": "Initial commit", }, ) """ - _response = self._client_wrapper.httpx_client.request( - "flows", - method="POST", - json={ - "path": path, - "id": id, - "attributes": attributes, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.upsert( + attributes=attributes, + path=path, + id=id, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListFlows: @@ -831,9 +662,6 @@ def list_versions( id : str Unique identifier for Flow. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -854,49 +682,18 @@ def list_versions( ) client.flows.list_versions( id="fl_6o701g4jmcanPVHxdqD0O", - status="committed", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListFlows, - construct_type( - type_=ListFlows, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> FlowResponse: + def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Flow with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Flow. Parameters ---------- @@ -906,16 +703,12 @@ def commit( version_id : str Unique identifier for the specific version of the Flow. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FlowResponse - Successful Response + None Examples -------- @@ -924,50 +717,25 @@ def commit( client = Humanloop( api_key="YOUR_API_KEY", ) - client.flows.commit( - id="fl_6o701g4jmcanPVHxdqD0O", - version_id="flv_6o701g4jmcanPVHxdqD0O", - commit_message="RAG lookup tool bug fixing", + client.flows.delete_flow_version( + id="id", + version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete_flow_version(id, version_id, request_options=request_options) + return response.data - def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: """ - Delete a version of the Flow. + Update the name or description of the Flow version. Parameters ---------- @@ -977,12 +745,19 @@ def delete_flow_version( version_id : str Unique identifier for the specific version of the Flow. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + FlowResponse + Successful Response Examples -------- @@ -991,33 +766,15 @@ def delete_flow_version( client = Humanloop( api_key="YOUR_API_KEY", ) - client.flows.delete_flow_version( + client.flows.update_flow_version( id="id", version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = self._raw_client.update_flow_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -1060,37 +817,10 @@ def set_deployment( version_id="flv_6o701g4jmcanPVHxdqD0O", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1128,28 +858,8 @@ def remove_deployment( environment_id="staging", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1181,34 +891,8 @@ def list_environments( id="fl_6o701g4jmcanPVHxdqD0O", ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data def update_monitoring( self, @@ -1254,52 +938,26 @@ def update_monitoring( activate=[{"evaluator_version_id": "evv_1abc4308abd"}], ) """ - _response = self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncFlowsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawFlowsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawFlowsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawFlowsClient + """ + return self._raw_client async def log( self, @@ -1478,75 +1136,37 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "flows/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "run_id": run_id, - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": flow_log_request_environment, - "save": save, - "log_id": log_id, - "flow": convert_and_respect_annotation_metadata( - object_=flow, annotation=FlowKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.log( + version_id=version_id, + environment=environment, + messages=messages, + output_message=output_message, + run_id=run_id, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + flow_log_request_environment=flow_log_request_environment, + save=save, + log_id=log_id, + flow=flow, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateFlowLogResponse, - construct_type( - type_=CreateFlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def update_log( self, @@ -1623,50 +1243,17 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/logs/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "inputs": inputs, - "output": output, - "error": error, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.update_log( + log_id, + messages=messages, + output_message=output_message, + inputs=inputs, + output=output, + error=error, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowLogResponse, - construct_type( - type_=FlowLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, @@ -1720,38 +1307,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -1788,28 +1347,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def move( self, @@ -1865,43 +1404,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - "directory_id": directory_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.move( + id, path=path, name=name, directory_id=directory_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list( self, @@ -1970,7 +1476,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "flows", method="GET", params={ @@ -2025,7 +1531,8 @@ async def upsert( attributes: typing.Dict[str, typing.Optional[typing.Any]], path: typing.Optional[str] = OMIT, id: typing.Optional[str] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> FlowResponse: """ @@ -2033,9 +1540,9 @@ async def upsert( Flows can also be identified by the `ID` or their `path`. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -2048,8 +1555,11 @@ async def upsert( id : typing.Optional[str] ID for an existing Flow. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -2084,57 +1594,26 @@ async def main() -> None: "description": "Retrieval tool for MedQA.", "source_code": "def retrieval_tool(question: str) -> str:\n pass\n", }, - "commit_message": "Initial commit", }, ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "flows", - method="POST", - json={ - "path": path, - "id": id, - "attributes": attributes, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.upsert( + attributes=attributes, + path=path, + id=id, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListFlows: @@ -2146,9 +1625,6 @@ async def list_versions( id : str Unique identifier for Flow. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -2174,52 +1650,21 @@ async def list_versions( async def main() -> None: await client.flows.list_versions( id="fl_6o701g4jmcanPVHxdqD0O", - status="committed", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListFlows, - construct_type( - type_=ListFlows, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - async def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> FlowResponse: + async def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Flow with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Flow. Parameters ---------- @@ -2229,16 +1674,12 @@ async def commit( version_id : str Unique identifier for the specific version of the Flow. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - FlowResponse - Successful Response + None Examples -------- @@ -2252,53 +1693,28 @@ async def commit( async def main() -> None: - await client.flows.commit( - id="fl_6o701g4jmcanPVHxdqD0O", - version_id="flv_6o701g4jmcanPVHxdqD0O", - commit_message="RAG lookup tool bug fixing", + await client.flows.delete_flow_version( + id="id", + version_id="version_id", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete_flow_version(id, version_id, request_options=request_options) + return response.data - async def delete_flow_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> FlowResponse: """ - Delete a version of the Flow. + Update the name or description of the Flow version. Parameters ---------- @@ -2308,12 +1724,19 @@ async def delete_flow_version( version_id : str Unique identifier for the specific version of the Flow. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + FlowResponse + Successful Response Examples -------- @@ -2327,7 +1750,7 @@ async def delete_flow_version( async def main() -> None: - await client.flows.delete_flow_version( + await client.flows.update_flow_version( id="id", version_id="version_id", ) @@ -2335,28 +1758,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = await self._raw_client.update_flow_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -2407,37 +1812,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2483,28 +1861,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data async def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2544,34 +1902,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data async def update_monitoring( self, @@ -2625,44 +1957,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"flows/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FlowResponse, - construct_type( - type_=FlowResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/flows/raw_client.py b/src/humanloop/flows/raw_client.py new file mode 100644 index 00000000..962f7eba --- /dev/null +++ b/src/humanloop/flows/raw_client.py @@ -0,0 +1,1981 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..requests.chat_message import ChatMessageParams +import datetime as dt +from ..types.log_status import LogStatus +from ..requests.flow_kernel_request import FlowKernelRequestParams +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.create_flow_log_response import CreateFlowLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.flow_log_response import FlowLogResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..types.flow_response import FlowResponse +from ..types.list_flows import ListFlows +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawFlowsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateFlowLogResponse]: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateFlowLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "flows/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "run_id": run_id, + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": flow_log_request_environment, + "save": save, + "log_id": log_id, + "flow": convert_and_respect_annotation_metadata( + object_=flow, annotation=FlowKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateFlowLogResponse, + construct_type( + type_=CreateFlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowLogResponse]: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/logs/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowLogResponse, + construct_type( + type_=FlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "flows", + method="POST", + json={ + "path": path, + "id": id, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListFlows]: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListFlows] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListFlows, + construct_type( + type_=ListFlows, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[FlowResponse]: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[FlowResponse]: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[FlowResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawFlowsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + flow_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + flow: typing.Optional[FlowKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateFlowLogResponse]: + """ + Log to a Flow. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Flow. Otherwise, the default deployed version will be chosen. + + If you create the Flow Log with a `log_status` of `incomplete`, you should later update it to `complete` + in order to trigger Evaluators. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Flow to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + start_time : typing.Optional[dt.datetime] + The start time of the Trace. Will be updated if a child Log with an earlier start time is added. + + end_time : typing.Optional[dt.datetime] + The end time of the Trace. Will be updated if a child Log with a later end time is added. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Log is updated from `incomplete` to `complete`, it becomes available to Monitoring Evaluators. Flow Logs cannot have an unspecified status: they must start as `incomplete` to allow children to be added. Provide `complete` if you don't intend to add children to the trace. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + flow_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + flow : typing.Optional[FlowKernelRequestParams] + Flow used to generate the Trace. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateFlowLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "flows/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "run_id": run_id, + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": flow_log_request_environment, + "save": save, + "log_id": log_id, + "flow": convert_and_respect_annotation_metadata( + object_=flow, annotation=FlowKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateFlowLogResponse, + construct_type( + type_=CreateFlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_log( + self, + log_id: str, + *, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + output: typing.Optional[str] = OMIT, + error: typing.Optional[str] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowLogResponse]: + """ + Update the status, inputs, output of a Flow Log. + + Marking a Flow Log as complete will trigger any monitoring Evaluators to run. + Inputs and output (or error) must be provided in order to mark it as complete. + + The end_time log attribute will be set to match the time the log is marked as complete. + + Parameters + ---------- + log_id : str + Unique identifier of the Flow Log. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + List of chat messages that were used as an input to the Flow. + + output_message : typing.Optional[ChatMessageParams] + The output message returned by this Flow. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the Flow Log. + + output : typing.Optional[str] + The output of the Flow Log. Provide None to unset existing `output` value. Provide either this, `output_message` or `error`. + + error : typing.Optional[str] + The error message of the Flow Log. Provide None to unset existing `error` value. Provide either this, `output_message` or `output`. + + log_status : typing.Optional[LogStatus] + Status of the Flow Log. When a Flow Log is updated to `complete`, no more Logs can be added to it. Monitoring Evaluators will only run on `complete` Flow Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/logs/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "inputs": inputs, + "output": output, + "error": error, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowLogResponse, + construct_type( + type_=FlowLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Retrieve the Flow with the given ID. + + By default, the deployed version of the Flow is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : typing.Optional[str] + A specific Version ID of the Flow to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Flow with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + directory_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Move the Flow to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + path : typing.Optional[str] + Path of the Flow including the Flow name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Flow. + + directory_id : typing.Optional[str] + Unique identifier for the Directory to move Flow to. Starts with `dir_`. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + "directory_id": directory_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + attributes: typing.Dict[str, typing.Optional[typing.Any]], + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Create or update a Flow. + + Flows can also be identified by the `ID` or their `path`. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Flow - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + attributes : typing.Dict[str, typing.Optional[typing.Any]] + A key-value object identifying the Flow Version. + + path : typing.Optional[str] + Path of the Flow, including the name. This locates the Flow in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Flow. + + version_name : typing.Optional[str] + Unique name for the Flow version. Version names must be unique for a given Flow. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "flows", + method="POST", + json={ + "path": path, + "id": id, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListFlows]: + """ + Get a list of all the versions of a Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListFlows] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListFlows, + construct_type( + type_=ListFlows, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_flow_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_flow_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Update the name or description of the Flow version. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + version_id : str + Unique identifier for the specific version of the Flow. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[FlowResponse]: + """ + Deploy Flow to an Environment. + + Set the deployed version for the specified Environment. This Flow + will be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Flow from the Environment. + + Remove the deployed version for the specified Environment. This Flow + will no longer be used for calls made to the Flow in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Flow. + + Parameters + ---------- + id : str + Unique identifier for Flow. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[FlowResponse]: + """ + Activate and deactivate Evaluators for monitoring the Flow. + + An activated Evaluator will automatically be run on all new "completed" Logs + within the Flow for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[FlowResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"flows/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + FlowResponse, + construct_type( + type_=FlowResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py index f1b0e1e6..17007c1b 100644 --- a/src/humanloop/logs/client.py +++ b/src/humanloop/logs/client.py @@ -1,6 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawLogsClient import typing from ..types.version_status import VersionStatus import datetime as dt @@ -14,14 +15,25 @@ from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError -from ..core.jsonable_encoder import jsonable_encoder from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawLogsClient from ..core.pagination import AsyncPager class LogsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawLogsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawLogsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawLogsClient + """ + return self._raw_client def list( self, @@ -115,7 +127,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "logs", method="GET", params={ @@ -212,31 +224,8 @@ def delete( id="prv_Wu6zx1lAWJRqOyL8nWuZk", ) """ - _response = self._client_wrapper.httpx_client.request( - "logs", - method="DELETE", - params={ - "id": id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id=id, request_options=request_options) + return response.data def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: """ @@ -266,39 +255,24 @@ def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = Non id="prv_Wu6zx1lAWJRqOyL8nWuZk", ) """ - _response = self._client_wrapper.httpx_client.request( - f"logs/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.get(id, request_options=request_options) + return response.data class AsyncLogsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawLogsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawLogsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawLogsClient + """ + return self._raw_client async def list( self, @@ -400,7 +374,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "logs", method="GET", params={ @@ -505,31 +479,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "logs", - method="DELETE", - params={ - "id": id, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id=id, request_options=request_options) + return response.data async def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> LogResponse: """ @@ -567,31 +518,5 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"logs/{jsonable_encoder(id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.get(id, request_options=request_options) + return response.data diff --git a/src/humanloop/logs/raw_client.py b/src/humanloop/logs/raw_client.py new file mode 100644 index 00000000..3859278b --- /dev/null +++ b/src/humanloop/logs/raw_client.py @@ -0,0 +1,215 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.client_wrapper import SyncClientWrapper +import typing +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from ..core.unchecked_base_model import construct_type +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.log_response import LogResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + + +class RawLogsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[None]: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + "logs", + method="DELETE", + params={ + "id": id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[LogResponse]: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"logs/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawLogsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def delete( + self, + *, + id: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[None]: + """ + Delete Logs with the given IDs. + + Parameters + ---------- + id : typing.Optional[typing.Union[str, typing.Sequence[str]]] + Unique identifiers for the Logs to delete. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + "logs", + method="DELETE", + params={ + "id": id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[LogResponse]: + """ + Retrieve the Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Log. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"logs/{jsonable_encoder(id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/prompt_utils.py b/src/humanloop/prompt_utils.py new file mode 100644 index 00000000..1747e286 --- /dev/null +++ b/src/humanloop/prompt_utils.py @@ -0,0 +1,158 @@ +import copy +from typing import Any, Dict, List, Optional, TypeVar, Sequence +import logging + +import re + +from .requests.chat_message import ChatMessageParams +from .prompts.requests.prompt_request_template import ( + PromptRequestTemplateParams, +) + + +logger = logging.getLogger(__name__) + + +class PromptVariablesNotFoundError(ValueError): + """Raised when inputs do not satisfy prompt variables.""" + + missing_variables: List[str] + """Missing variables""" + + def __init__(self, missing_variables: List[str]) -> None: + self.missing_variables = missing_variables + super().__init__(f"Prompt requires inputs for the following " f"variables: {self.missing_variables}") + + +def populate_prompt_template( + template: str, + inputs: Optional[Dict[str, Any]], +) -> str: + """Interpolate a string template with kwargs, where template variables + are specified using double curly bracket syntax: {{variable}}. + + args: + template: str - string template where template variables are specified + using double curly bracket syntax: {{variable}}. + + inputs - represent the key, value string pairs to inject into the template + variables, where key corresponds to the template variable name and + value to the variable value to inject + + return: + The interpolated template string + + raises: + PromptVariablesNotFoundError - if any variables are missing from inputs + """ + template_variables: List[str] = re.findall( + # Matching variables: `{{ variable_2 }}` + r"{{\s?([a-zA-Z_\d\.\[\]]+)\s?}}", + template, + ) + re.findall( + # Matching tools: `{{ tool_2("all characters$#@$!") }}` + # https://regexr.com/7nvrf + r"\{\{\s?([a-zA-Z_\-\d]+\([a-zA-Z_\-\d,\s\"]+\))\s?\}\}", + template, + ) + + # populate the template variables, tracking if any are missing + prompt = template + missing_vars = [] + + if inputs is None: + inputs = {} + + # e.g. var: input_name, sig(input_name), sig(other_name), sig("string") + for var in template_variables: + text: Optional[str] = None + + if var in inputs: + text = inputs[var] + + if text is None: + missing_vars.append(var) + else: + if not isinstance(text, str): + logger.info(f"Converting input value for variable '{var}' to string for prompt template: " f"{text}") + text = str(text) + replacement = sanitize_prompt(prompt=text) if text else text + prompt = re.sub( + r"{{\s?" + re.escape(var) + r"\s?}}", + replacement, + prompt, + ) + + if missing_vars: + missing_vars.sort() + raise PromptVariablesNotFoundError( + missing_variables=missing_vars, + ) + + return prompt + + +def sanitize_prompt(prompt: str): + return prompt.replace("\\", "\\\\") + + +def populate_chat_template( + chat_template: Sequence[ChatMessageParams], + inputs: Optional[Dict[str, str]] = None, +) -> List[ChatMessageParams]: + """Interpolate a chat template with kwargs, where template variables.""" + messages = [] + message: ChatMessageParams + for message in chat_template: + if "content" not in message: + messages.append(message) + continue + + message_content = copy.deepcopy(message["content"]) + if isinstance(message_content, str): + message_content = populate_prompt_template( + template=message_content, + inputs=inputs, + ) + elif isinstance(message_content, list): + for j, content_item in enumerate(message_content): + if content_item["type"] == "text": + content_item_text = content_item["text"] + (content_item_text,) = populate_prompt_template( + template=content_item_text, + inputs=inputs, + ) + content_item["text"] = content_item_text + messages.append( + ChatMessageParams( + role=message["role"], + content=message_content, + ) + ) + return messages + + +T = TypeVar("T", bound=PromptRequestTemplateParams) + + +def populate_template(template: T, inputs: Dict[str, str]) -> T: + """Populate a Prompt's template with the given inputs. + + Humanloop supports insertion of variables of the form `{{variable}}` in + Prompt templates. + E.g. If you provide the template `Hello {{name}}` and the input + `{"name": "Alice"}`, the populated template will be `Hello Alice`. + + This function supports both completion and chat models. For completion + models, provide template as a string. For chat models, provide template + as a list of messages. + """ + if isinstance(template, str): + return populate_prompt_template( + template=template, + inputs=inputs, + ) + return populate_chat_template( + chat_template=template, + inputs=inputs, + ) diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py index fe2ce046..e546b2a1 100644 --- a/src/humanloop/prompts/client.py +++ b/src/humanloop/prompts/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawPromptsClient from ..requests.chat_message import ChatMessageParams from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams from ..requests.prompt_kernel_request import PromptKernelRequestParams @@ -9,20 +10,11 @@ from ..types.log_status import LogStatus from ..core.request_options import RequestOptions from ..types.create_prompt_log_response import CreatePromptLogResponse -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams from ..types.log_response import LogResponse -from ..core.jsonable_encoder import jsonable_encoder from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams from ..requests.provider_api_keys import ProviderApiKeysParams from ..types.prompt_call_stream_response import PromptCallStreamResponse -import httpx_sse -import json from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams from ..types.prompt_call_response import PromptCallResponse from ..types.project_sort_by import ProjectSortBy @@ -30,6 +22,11 @@ from ..core.pagination import SyncPager from ..types.prompt_response import PromptResponse from ..types.paginated_data_prompt_response import PaginatedDataPromptResponse +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError from ..types.model_endpoints import ModelEndpoints from .requests.prompt_request_template import PromptRequestTemplateParams from ..types.template_language import TemplateLanguage @@ -39,7 +36,6 @@ from ..types.reasoning_effort import ReasoningEffort from ..requests.tool_function import ToolFunctionParams from ..types.populate_template_response import PopulateTemplateResponse -from ..types.version_status import VersionStatus from ..types.list_prompts import ListPrompts from ..types.file_environment_response import FileEnvironmentResponse from ..requests.evaluator_activation_deactivation_request_activate_item import ( @@ -49,6 +45,7 @@ EvaluatorActivationDeactivationRequestDeactivateItemParams, ) from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawPromptsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -57,7 +54,18 @@ class PromptsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawPromptsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawPromptsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawPromptsClient + """ + return self._raw_client def log( self, @@ -262,84 +270,44 @@ def log( finish_reason="stop", ) """ - _response = self._client_wrapper.httpx_client.request( - "prompts/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompt_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompt_log_request_environment=prompt_log_request_environment, + save=save, + log_id=log_id, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreatePromptLogResponse, - construct_type( - type_=CreatePromptLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def update_log( self, @@ -473,68 +441,34 @@ def update_log( log_id="log_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" - ), - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.update_log( + id, + log_id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def call_stream( self, @@ -676,82 +610,34 @@ def call_stream( for chunk in response: yield chunk """ - with self._client_wrapper.httpx_client.stream( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, + with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_stream_request_environment=prompts_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, request_options=request_options, - omit=OMIT, - ) as _response: - try: - if 200 <= _response.status_code < 300: - _event_source = httpx_sse.EventSource(_response) - for _sse in _event_source.iter_sse(): - try: - yield typing.cast( - PromptCallStreamResponse, - construct_type( - type_=PromptCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except: - pass - return - _response.read() - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + ) as r: + yield from r.data def call( self, @@ -920,75 +806,34 @@ def call( messages=[{"role": "user", "content": "latest apple"}], ) """ - _response = self._client_wrapper.httpx_client.request( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_request_environment=prompts_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptCallResponse, - construct_type( - type_=PromptCallResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list( self, @@ -1049,7 +894,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "prompts", method="GET", params={ @@ -1121,7 +966,8 @@ def upsert( tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, description: typing.Optional[str] = OMIT, tags: typing.Optional[typing.Sequence[str]] = OMIT, readme: typing.Optional[str] = OMIT, @@ -1132,9 +978,9 @@ def upsert( Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -1203,8 +1049,11 @@ def upsert( attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. description : typing.Optional[str] Description of the Prompt. @@ -1243,75 +1092,37 @@ def upsert( provider="openai", max_tokens=-1, temperature=0.7, - commit_message="Initial commit", ) """ - _response = self._client_wrapper.httpx_client.request( - "prompts", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=PromptRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=PromptRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": reasoning_effort, - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" - ), - "linked_tools": linked_tools, - "attributes": attributes, - "commit_message": commit_message, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + linked_tools=linked_tools, + attributes=attributes, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def get( self, @@ -1357,38 +1168,10 @@ def get( id="pr_30gco7dx6JDq4200GVOHa", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -1417,28 +1200,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="pr_30gco7dx6JDq4200GVOHa", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def move( self, @@ -1482,44 +1245,10 @@ def move( path="new directory/new name", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data - def populate_template( + def populate( self, id: str, *, @@ -1562,51 +1291,20 @@ def populate_template( client = Humanloop( api_key="YOUR_API_KEY", ) - client.prompts.populate_template( + client.prompts.populate( id="id", request={"key": "value"}, ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/populate", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json=request, - request_options=request_options, - omit=OMIT, + response = self._raw_client.populate( + id, request=request, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PopulateTemplateResponse, - construct_type( - type_=PopulateTemplateResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListPrompts: @@ -1618,9 +1316,6 @@ def list_versions( id : str Unique identifier for Prompt. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -1641,49 +1336,18 @@ def list_versions( ) client.prompts.list_versions( id="pr_30gco7dx6JDq4200GVOHa", - status="committed", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListPrompts, - construct_type( - type_=ListPrompts, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptResponse: + def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Prompt with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Prompt. Parameters ---------- @@ -1693,16 +1357,12 @@ def commit( version_id : str Unique identifier for the specific version of the Prompt. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - PromptResponse - Successful Response + None Examples -------- @@ -1711,50 +1371,25 @@ def commit( client = Humanloop( api_key="YOUR_API_KEY", ) - client.prompts.commit( - id="pr_30gco7dx6JDq4200GVOHa", - version_id="prv_F34aba5f3asp0", - commit_message="Reiterated point about not discussing sentience", + client.prompts.delete_prompt_version( + id="id", + version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) + return response.data - def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: """ - Delete a version of the Prompt. + Update the name or description of the Prompt version. Parameters ---------- @@ -1764,12 +1399,19 @@ def delete_prompt_version( version_id : str Unique identifier for the specific version of the Prompt. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + PromptResponse + Successful Response Examples -------- @@ -1778,33 +1420,15 @@ def delete_prompt_version( client = Humanloop( api_key="YOUR_API_KEY", ) - client.prompts.delete_prompt_version( + client.prompts.patch_prompt_version( id="id", version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = self._raw_client.patch_prompt_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -1847,37 +1471,10 @@ def set_deployment( version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1915,28 +1512,8 @@ def remove_deployment( environment_id="environment_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1968,34 +1545,8 @@ def list_environments( id="pr_30gco7dx6JDq4200GVOHa", ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data def update_monitoring( self, @@ -2041,52 +1592,26 @@ def update_monitoring( activate=[{"evaluator_version_id": "evv_1abc4308abd"}], ) """ - _response = self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncPromptsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawPromptsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawPromptsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawPromptsClient + """ + return self._raw_client async def log( self, @@ -2300,84 +1825,44 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "prompts/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "run_id": run_id, - "path": path, - "id": id, - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompt_log_request_environment, - "save": save, - "log_id": log_id, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.log( + version_id=version_id, + environment=environment, + run_id=run_id, + path=path, + id=id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompt_log_request_environment=prompt_log_request_environment, + save=save, + log_id=log_id, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreatePromptLogResponse, - construct_type( - type_=CreatePromptLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def update_log( self, @@ -2519,68 +2004,34 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output_message": convert_and_respect_annotation_metadata( - object_=output_message, annotation=ChatMessageParams, direction="write" - ), - "prompt_tokens": prompt_tokens, - "reasoning_tokens": reasoning_tokens, - "output_tokens": output_tokens, - "prompt_cost": prompt_cost, - "output_cost": output_cost, - "finish_reason": finish_reason, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" - ), - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.update_log( + id, + log_id, + output_message=output_message, + prompt_tokens=prompt_tokens, + reasoning_tokens=reasoning_tokens, + output_tokens=output_tokens, + prompt_cost=prompt_cost, + output_cost=output_cost, + finish_reason=finish_reason, + messages=messages, + tool_choice=tool_choice, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def call_stream( self, @@ -2730,82 +2181,35 @@ async def main() -> None: asyncio.run(main()) """ - async with self._client_wrapper.httpx_client.stream( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_stream_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": True, - }, - headers={ - "content-type": "application/json", - }, + async with self._raw_client.call_stream( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_stream_request_environment=prompts_call_stream_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, request_options=request_options, - omit=OMIT, - ) as _response: - try: - if 200 <= _response.status_code < 300: - _event_source = httpx_sse.EventSource(_response) - async for _sse in _event_source.aiter_sse(): - try: - yield typing.cast( - PromptCallStreamResponse, - construct_type( - type_=PromptCallStreamResponse, # type: ignore - object_=json.loads(_sse.data), - ), - ) - except: - pass - return - await _response.aread() - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + ) as r: + async for data in r.data: + yield data async def call( self, @@ -2982,75 +2386,34 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "prompts/call", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "messages": convert_and_respect_annotation_metadata( - object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" - ), - "tool_choice": convert_and_respect_annotation_metadata( - object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" - ), - "prompt": convert_and_respect_annotation_metadata( - object_=prompt, annotation=PromptKernelRequestParams, direction="write" - ), - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": prompts_call_request_environment, - "save": save, - "log_id": log_id, - "provider_api_keys": convert_and_respect_annotation_metadata( - object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" - ), - "num_samples": num_samples, - "return_inputs": return_inputs, - "logprobs": logprobs, - "suffix": suffix, - "stream": False, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.call( + version_id=version_id, + environment=environment, + path=path, + id=id, + messages=messages, + tool_choice=tool_choice, + prompt=prompt, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + prompts_call_request_environment=prompts_call_request_environment, + save=save, + log_id=log_id, + provider_api_keys=provider_api_keys, + num_samples=num_samples, + return_inputs=return_inputs, + logprobs=logprobs, + suffix=suffix, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptCallResponse, - construct_type( - type_=PromptCallResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list( self, @@ -3119,7 +2482,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "prompts", method="GET", params={ @@ -3191,7 +2554,8 @@ async def upsert( tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, description: typing.Optional[str] = OMIT, tags: typing.Optional[typing.Sequence[str]] = OMIT, readme: typing.Optional[str] = OMIT, @@ -3202,9 +2566,9 @@ async def upsert( Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -3273,8 +2637,11 @@ async def upsert( attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. description : typing.Optional[str] Description of the Prompt. @@ -3318,78 +2685,40 @@ async def main() -> None: provider="openai", max_tokens=-1, temperature=0.7, - commit_message="Initial commit", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "prompts", - method="POST", - json={ - "path": path, - "id": id, - "model": model, - "endpoint": endpoint, - "template": convert_and_respect_annotation_metadata( - object_=template, annotation=PromptRequestTemplateParams, direction="write" - ), - "template_language": template_language, - "provider": provider, - "max_tokens": max_tokens, - "temperature": temperature, - "top_p": top_p, - "stop": convert_and_respect_annotation_metadata( - object_=stop, annotation=PromptRequestStopParams, direction="write" - ), - "presence_penalty": presence_penalty, - "frequency_penalty": frequency_penalty, - "other": other, - "seed": seed, - "response_format": convert_and_respect_annotation_metadata( - object_=response_format, annotation=ResponseFormatParams, direction="write" - ), - "reasoning_effort": reasoning_effort, - "tools": convert_and_respect_annotation_metadata( - object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" - ), - "linked_tools": linked_tools, - "attributes": attributes, - "commit_message": commit_message, - "description": description, - "tags": tags, - "readme": readme, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.upsert( + model=model, + path=path, + id=id, + endpoint=endpoint, + template=template, + template_language=template_language, + provider=provider, + max_tokens=max_tokens, + temperature=temperature, + top_p=top_p, + stop=stop, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + other=other, + seed=seed, + response_format=response_format, + reasoning_effort=reasoning_effort, + tools=tools, + linked_tools=linked_tools, + attributes=attributes, + version_name=version_name, + version_description=version_description, + description=description, + tags=tags, + readme=readme, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, @@ -3443,38 +2772,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -3511,28 +2812,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def move( self, @@ -3584,44 +2865,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data - async def populate_template( + async def populate( self, id: str, *, @@ -3669,7 +2916,7 @@ async def populate_template( async def main() -> None: - await client.prompts.populate_template( + await client.prompts.populate( id="id", request={"key": "value"}, ) @@ -3677,46 +2924,15 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/populate", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json=request, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.populate( + id, request=request, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PopulateTemplateResponse, - construct_type( - type_=PopulateTemplateResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListPrompts: @@ -3728,9 +2944,6 @@ async def list_versions( id : str Unique identifier for Prompt. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -3756,52 +2969,21 @@ async def list_versions( async def main() -> None: await client.prompts.list_versions( id="pr_30gco7dx6JDq4200GVOHa", - status="committed", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListPrompts, - construct_type( - type_=ListPrompts, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - async def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> PromptResponse: + async def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Prompt with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Prompt. Parameters ---------- @@ -3811,16 +2993,12 @@ async def commit( version_id : str Unique identifier for the specific version of the Prompt. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - PromptResponse - Successful Response + None Examples -------- @@ -3834,53 +3012,28 @@ async def commit( async def main() -> None: - await client.prompts.commit( - id="pr_30gco7dx6JDq4200GVOHa", - version_id="prv_F34aba5f3asp0", - commit_message="Reiterated point about not discussing sentience", + await client.prompts.delete_prompt_version( + id="id", + version_id="version_id", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete_prompt_version(id, version_id, request_options=request_options) + return response.data - async def delete_prompt_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> PromptResponse: """ - Delete a version of the Prompt. + Update the name or description of the Prompt version. Parameters ---------- @@ -3890,12 +3043,19 @@ async def delete_prompt_version( version_id : str Unique identifier for the specific version of the Prompt. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + PromptResponse + Successful Response Examples -------- @@ -3909,7 +3069,7 @@ async def delete_prompt_version( async def main() -> None: - await client.prompts.delete_prompt_version( + await client.prompts.patch_prompt_version( id="id", version_id="version_id", ) @@ -3917,28 +3077,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = await self._raw_client.patch_prompt_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -3989,37 +3131,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -4065,28 +3180,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data async def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -4126,34 +3221,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data async def update_monitoring( self, @@ -4207,44 +3276,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"prompts/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PromptResponse, - construct_type( - type_=PromptResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py new file mode 100644 index 00000000..b5334c82 --- /dev/null +++ b/src/humanloop/prompts/raw_client.py @@ -0,0 +1,3441 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +from ..requests.chat_message import ChatMessageParams +from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams +from ..requests.prompt_kernel_request import PromptKernelRequestParams +import datetime as dt +from ..types.log_status import LogStatus +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.create_prompt_log_response import CreatePromptLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams +from ..types.log_response import LogResponse +from ..core.jsonable_encoder import jsonable_encoder +from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams +from ..requests.provider_api_keys import ProviderApiKeysParams +from ..types.prompt_call_stream_response import PromptCallStreamResponse +import httpx_sse +import contextlib +from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams +from ..types.prompt_call_response import PromptCallResponse +from ..types.model_endpoints import ModelEndpoints +from .requests.prompt_request_template import PromptRequestTemplateParams +from ..types.template_language import TemplateLanguage +from ..types.model_providers import ModelProviders +from .requests.prompt_request_stop import PromptRequestStopParams +from ..requests.response_format import ResponseFormatParams +from ..types.reasoning_effort import ReasoningEffort +from ..requests.tool_function import ToolFunctionParams +from ..types.prompt_response import PromptResponse +from ..types.populate_template_response import PopulateTemplateResponse +from ..types.list_prompts import ListPrompts +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawPromptsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreatePromptLogResponse]: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreatePromptLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompt_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreatePromptLogResponse, + construct_type( + type_=CreatePromptLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" + ), + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.contextmanager + def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.Iterator[HttpResponse[typing.Iterator[PromptCallStreamResponse]]] + + """ + with self._client_wrapper.httpx_client.stream( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + def stream() -> HttpResponse[typing.Iterator[PromptCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + def _iter(): + _event_source = httpx_sse.EventSource(_response) + for _sse in _event_source.iter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return HttpResponse(response=_response, data=_iter()) + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield stream() + + def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptCallResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptCallResponse] + + """ + _response = self._client_wrapper.httpx_client.request( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptCallResponse, + construct_type( + type_=PromptCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[ReasoningEffort] + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "prompts", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=PromptRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=PromptRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": reasoning_effort, + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" + ), + "linked_tools": linked_tools, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PopulateTemplateResponse]: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PopulateTemplateResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/populate", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PopulateTemplateResponse, + construct_type( + type_=PopulateTemplateResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListPrompts]: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListPrompts] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListPrompts, + construct_type( + type_=ListPrompts, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[PromptResponse]: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[PromptResponse]: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[PromptResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawPromptsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + run_id: typing.Optional[str] = OMIT, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompt_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreatePromptLogResponse]: + """ + Log to a Prompt. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise, the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + run_id : typing.Optional[str] + Unique identifier for the Run to associate the Log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompt_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreatePromptLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "run_id": run_id, + "path": path, + "id": id, + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompt_log_request_environment, + "save": save, + "log_id": log_id, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreatePromptLogResponse, + construct_type( + type_=CreatePromptLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_log( + self, + id: str, + log_id: str, + *, + output_message: typing.Optional[ChatMessageParams] = OMIT, + prompt_tokens: typing.Optional[int] = OMIT, + reasoning_tokens: typing.Optional[int] = OMIT, + output_tokens: typing.Optional[int] = OMIT, + prompt_cost: typing.Optional[float] = OMIT, + output_cost: typing.Optional[float] = OMIT, + finish_reason: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptLogUpdateRequestToolChoiceParams] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output_message : typing.Optional[ChatMessageParams] + The message returned by the provider. + + prompt_tokens : typing.Optional[int] + Number of tokens in the prompt used to generate the output. + + reasoning_tokens : typing.Optional[int] + Number of reasoning tokens used to generate the output. + + output_tokens : typing.Optional[int] + Number of tokens in the output generated by the model. + + prompt_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the prompt. + + output_cost : typing.Optional[float] + Cost in dollars associated to the tokens in the output. + + finish_reason : typing.Optional[str] + Reason the generation finished. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptLogUpdateRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output_message": convert_and_respect_annotation_metadata( + object_=output_message, annotation=ChatMessageParams, direction="write" + ), + "prompt_tokens": prompt_tokens, + "reasoning_tokens": reasoning_tokens, + "output_tokens": output_tokens, + "prompt_cost": prompt_cost, + "output_cost": output_cost, + "finish_reason": finish_reason, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptLogUpdateRequestToolChoiceParams, direction="write" + ), + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + @contextlib.asynccontextmanager + async def call_stream( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_stream_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallStreamRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_stream_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Yields + ------ + typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]] + + """ + async with self._client_wrapper.httpx_client.stream( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_stream_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": True, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + + async def stream() -> AsyncHttpResponse[typing.AsyncIterator[PromptCallStreamResponse]]: + try: + if 200 <= _response.status_code < 300: + + async def _iter(): + _event_source = httpx_sse.EventSource(_response) + async for _sse in _event_source.aiter_sse(): + if _sse.data == None: + return + try: + yield _sse.data() + except Exception: + pass + return + + return AsyncHttpResponse(response=_response, data=_iter()) + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + yield await stream() + + async def call( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT, + tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT, + prompt: typing.Optional[PromptKernelRequestParams] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + prompts_call_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + provider_api_keys: typing.Optional[ProviderApiKeysParams] = OMIT, + num_samples: typing.Optional[int] = OMIT, + return_inputs: typing.Optional[bool] = OMIT, + logprobs: typing.Optional[int] = OMIT, + suffix: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptCallResponse]: + """ + Call a Prompt. + + Calling a Prompt calls the model provider before logging + the request, responses and metadata to Humanloop. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Prompt. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Prompt details in the request body. In this case, we will check if the details correspond + to an existing version of the Prompt. If they do not, we will create a new version. This is helpful + in the case where you are storing or deriving your Prompt details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Prompt to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + messages : typing.Optional[typing.Sequence[ChatMessageParams]] + The messages passed to the to provider chat endpoint. + + tool_choice : typing.Optional[PromptsCallRequestToolChoiceParams] + Controls how the model uses tools. The following options are supported: + - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt. + - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt. + - `'required'` means the model must call one or more of the provided tools. + - `{'type': 'function', 'function': {name': }}` forces the model to use the named function. + + prompt : typing.Optional[PromptKernelRequestParams] + Details of your Prompt. A new Prompt version will be created if the provided details are new. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + prompts_call_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + provider_api_keys : typing.Optional[ProviderApiKeysParams] + API keys required by each provider to make API calls. The API keys provided here are not stored by Humanloop. If not specified here, Humanloop will fall back to the key saved to your organization. + + num_samples : typing.Optional[int] + The number of generations. + + return_inputs : typing.Optional[bool] + Whether to return the inputs in the response. If false, the response will contain an empty dictionary under inputs. This is useful for reducing the size of the response. Defaults to true. + + logprobs : typing.Optional[int] + Include the log probabilities of the top n tokens in the provider_response + + suffix : typing.Optional[str] + The suffix that comes after a completion of inserted text. Useful for completions that act like inserts. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptCallResponse] + + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts/call", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "messages": convert_and_respect_annotation_metadata( + object_=messages, annotation=typing.Sequence[ChatMessageParams], direction="write" + ), + "tool_choice": convert_and_respect_annotation_metadata( + object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write" + ), + "prompt": convert_and_respect_annotation_metadata( + object_=prompt, annotation=PromptKernelRequestParams, direction="write" + ), + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": prompts_call_request_environment, + "save": save, + "log_id": log_id, + "provider_api_keys": convert_and_respect_annotation_metadata( + object_=provider_api_keys, annotation=ProviderApiKeysParams, direction="write" + ), + "num_samples": num_samples, + "return_inputs": return_inputs, + "logprobs": logprobs, + "suffix": suffix, + "stream": False, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptCallResponse, + construct_type( + type_=PromptCallResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + model: str, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + endpoint: typing.Optional[ModelEndpoints] = OMIT, + template: typing.Optional[PromptRequestTemplateParams] = OMIT, + template_language: typing.Optional[TemplateLanguage] = OMIT, + provider: typing.Optional[ModelProviders] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + temperature: typing.Optional[float] = OMIT, + top_p: typing.Optional[float] = OMIT, + stop: typing.Optional[PromptRequestStopParams] = OMIT, + presence_penalty: typing.Optional[float] = OMIT, + frequency_penalty: typing.Optional[float] = OMIT, + other: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + seed: typing.Optional[int] = OMIT, + response_format: typing.Optional[ResponseFormatParams] = OMIT, + reasoning_effort: typing.Optional[ReasoningEffort] = OMIT, + tools: typing.Optional[typing.Sequence[ToolFunctionParams]] = OMIT, + linked_tools: typing.Optional[typing.Sequence[str]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + tags: typing.Optional[typing.Sequence[str]] = OMIT, + readme: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Create a Prompt or update it with a new version if it already exists. + + Prompts are identified by the `ID` or their `path`. The parameters (i.e. the prompt template, temperature, model etc.) determine the versions of the Prompt. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Prompt - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + model : str + The model instance used, e.g. `gpt-4`. See [supported models](https://humanloop.com/docs/reference/supported-models) + + path : typing.Optional[str] + Path of the Prompt, including the name. This locates the Prompt in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Prompt. + + endpoint : typing.Optional[ModelEndpoints] + The provider model endpoint used. + + template : typing.Optional[PromptRequestTemplateParams] + The template contains the main structure and instructions for the model, including input variables for dynamic values. + + For chat models, provide the template as a ChatTemplate (a list of messages), e.g. a system message, followed by a user message with an input variable. + For completion models, provide a prompt template as a string. + + Input variables should be specified with double curly bracket syntax: `{{input_name}}`. + + template_language : typing.Optional[TemplateLanguage] + The template language to use for rendering the template. + + provider : typing.Optional[ModelProviders] + The company providing the underlying model service. + + max_tokens : typing.Optional[int] + The maximum number of tokens to generate. Provide max_tokens=-1 to dynamically calculate the maximum number of tokens to generate given the length of the prompt + + temperature : typing.Optional[float] + What sampling temperature to use when making a generation. Higher values means the model will be more creative. + + top_p : typing.Optional[float] + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. + + stop : typing.Optional[PromptRequestStopParams] + The string (or list of strings) after which the model will stop generating. The returned text will not contain the stop sequence. + + presence_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the generation so far. + + frequency_penalty : typing.Optional[float] + Number between -2.0 and 2.0. Positive values penalize new tokens based on how frequently they appear in the generation so far. + + other : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Other parameter values to be passed to the provider call. + + seed : typing.Optional[int] + If specified, model will make a best effort to sample deterministically, but it is not guaranteed. + + response_format : typing.Optional[ResponseFormatParams] + The format of the response. Only `{"type": "json_object"}` is currently supported for chat. + + reasoning_effort : typing.Optional[ReasoningEffort] + Give model guidance on how many reasoning tokens it should generate before creating a response to the prompt. This is only supported for OpenAI reasoning (o1, o3-mini) models. + + tools : typing.Optional[typing.Sequence[ToolFunctionParams]] + The tool specification that the model can choose to call if Tool calling is supported. + + linked_tools : typing.Optional[typing.Sequence[str]] + The IDs of the Tools in your organization that the model can choose to call if Tool calling is supported. The default deployed version of that tool is called. + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. + + version_name : typing.Optional[str] + Unique name for the Prompt version. Version names must be unique for a given Prompt. + + version_description : typing.Optional[str] + Description of the version, e.g., the changes made in this version. + + description : typing.Optional[str] + Description of the Prompt. + + tags : typing.Optional[typing.Sequence[str]] + List of tags associated with this prompt. + + readme : typing.Optional[str] + Long description of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "prompts", + method="POST", + json={ + "path": path, + "id": id, + "model": model, + "endpoint": endpoint, + "template": convert_and_respect_annotation_metadata( + object_=template, annotation=PromptRequestTemplateParams, direction="write" + ), + "template_language": template_language, + "provider": provider, + "max_tokens": max_tokens, + "temperature": temperature, + "top_p": top_p, + "stop": convert_and_respect_annotation_metadata( + object_=stop, annotation=PromptRequestStopParams, direction="write" + ), + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "other": other, + "seed": seed, + "response_format": convert_and_respect_annotation_metadata( + object_=response_format, annotation=ResponseFormatParams, direction="write" + ), + "reasoning_effort": reasoning_effort, + "tools": convert_and_respect_annotation_metadata( + object_=tools, annotation=typing.Sequence[ToolFunctionParams], direction="write" + ), + "linked_tools": linked_tools, + "attributes": attributes, + "version_name": version_name, + "version_description": version_description, + "description": description, + "tags": tags, + "readme": readme, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Retrieve the Prompt with the given ID. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Prompt with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Move the Prompt to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + path : typing.Optional[str] + Path of the Prompt including the Prompt name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def populate( + self, + id: str, + *, + request: typing.Dict[str, typing.Optional[typing.Any]], + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PopulateTemplateResponse]: + """ + Retrieve the Prompt with the given ID, including the populated template. + + By default, the deployed version of the Prompt is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request : typing.Dict[str, typing.Optional[typing.Any]] + + version_id : typing.Optional[str] + A specific Version ID of the Prompt to retrieve to populate the template. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from to populate the template. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PopulateTemplateResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/populate", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json=request, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PopulateTemplateResponse, + construct_type( + type_=PopulateTemplateResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListPrompts]: + """ + Get a list of all the versions of a Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListPrompts] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListPrompts, + construct_type( + type_=ListPrompts, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_prompt_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def patch_prompt_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Update the name or description of the Prompt version. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + version_id : str + Unique identifier for the specific version of the Prompt. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[PromptResponse]: + """ + Deploy Prompt to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Prompt from the Environment. + + Remove the deployed version for the specified Environment. This Prompt + will no longer be used for calls made to the Prompt in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Prompt. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[PromptResponse]: + """ + Activate and deactivate Evaluators for monitoring the Prompt. + + An activated Evaluator will automatically be run on all new Logs + within the Prompt for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[PromptResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"prompts/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + PromptResponse, + construct_type( + type_=PromptResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/requests/__init__.py b/src/humanloop/requests/__init__.py index e0e9304e..bd9458ba 100644 --- a/src/humanloop/requests/__init__.py +++ b/src/humanloop/requests/__init__.py @@ -6,7 +6,6 @@ from .chat_message_content import ChatMessageContentParams from .chat_message_content_item import ChatMessageContentItemParams from .code_evaluator_request import CodeEvaluatorRequestParams -from .commit_request import CommitRequestParams from .create_datapoint_request import CreateDatapointRequestParams from .create_datapoint_request_target_value import CreateDatapointRequestTargetValueParams from .create_evaluator_log_response import CreateEvaluatorLogResponseParams @@ -123,6 +122,7 @@ from .tool_kernel_request import ToolKernelRequestParams from .tool_log_response import ToolLogResponseParams from .tool_response import ToolResponseParams +from .update_version_request import UpdateVersionRequestParams from .validation_error import ValidationErrorParams from .validation_error_loc_item import ValidationErrorLocItemParams from .version_deployment_response import VersionDeploymentResponseParams @@ -141,7 +141,6 @@ "ChatMessageContentParams", "ChatMessageParams", "CodeEvaluatorRequestParams", - "CommitRequestParams", "CreateDatapointRequestParams", "CreateDatapointRequestTargetValueParams", "CreateEvaluatorLogResponseParams", @@ -248,6 +247,7 @@ "ToolKernelRequestParams", "ToolLogResponseParams", "ToolResponseParams", + "UpdateVersionRequestParams", "ValidationErrorLocItemParams", "ValidationErrorParams", "VersionDeploymentResponseFileParams", diff --git a/src/humanloop/requests/commit_request.py b/src/humanloop/requests/commit_request.py deleted file mode 100644 index 7d8134bd..00000000 --- a/src/humanloop/requests/commit_request.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing_extensions - - -class CommitRequestParams(typing_extensions.TypedDict): - commit_message: str - """ - Message describing the changes made. - """ diff --git a/src/humanloop/requests/dataset_response.py b/src/humanloop/requests/dataset_response.py index 88f95068..1d59ed4b 100644 --- a/src/humanloop/requests/dataset_response.py +++ b/src/humanloop/requests/dataset_response.py @@ -6,7 +6,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .datapoint_response import DatapointResponseParams @@ -71,25 +70,15 @@ class DatasetResponseParams(typing_extensions.TypedDict): The user who created the Dataset. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Dataset Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Dataset Version was committed. - """ - - status: VersionStatus + last_used_at: dt.datetime + version_name: typing_extensions.NotRequired[str] """ - The status of the Dataset Version. + Unique name for the Dataset version. Version names must be unique for a given Dataset. """ - last_used_at: dt.datetime - commit_message: typing_extensions.NotRequired[str] + version_description: typing_extensions.NotRequired[str] """ - Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. + Description of the version, e.g., the changes made in this version. """ datapoints_count: int diff --git a/src/humanloop/requests/evaluator_response.py b/src/humanloop/requests/evaluator_response.py index fe1230b6..908eeb2d 100644 --- a/src/humanloop/requests/evaluator_response.py +++ b/src/humanloop/requests/evaluator_response.py @@ -8,7 +8,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .input_response import InputResponseParams from .evaluator_aggregate import EvaluatorAggregateParams import typing @@ -37,9 +36,14 @@ class EvaluatorResponseParams(typing_extensions.TypedDict): ID of the directory that the file is in on Humanloop. """ - commit_message: typing_extensions.NotRequired[str] + version_name: typing_extensions.NotRequired[str] """ - Message describing the changes made. + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. """ spec: EvaluatorResponseSpecParams @@ -81,17 +85,6 @@ class EvaluatorResponseParams(typing_extensions.TypedDict): The user who created the Evaluator. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Evaluator Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Evaluator Version was committed. - """ - - status: VersionStatus last_used_at: dt.datetime version_logs_count: int """ diff --git a/src/humanloop/requests/flow_response.py b/src/humanloop/requests/flow_response.py index 0adbc54c..18a26d10 100644 --- a/src/humanloop/requests/flow_response.py +++ b/src/humanloop/requests/flow_response.py @@ -7,7 +7,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .evaluator_aggregate import EvaluatorAggregateParams import typing @@ -40,9 +39,14 @@ class FlowResponseParams(typing_extensions.TypedDict): A key-value object identifying the Flow Version. """ - commit_message: typing_extensions.NotRequired[str] + version_name: typing_extensions.NotRequired[str] """ - Message describing the changes made. If provided, a committed version of the Flow is created. Otherwise, an uncommitted version is created. + Unique name for the Flow version. Version names must be unique for a given Flow. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the Version. """ name: str @@ -83,21 +87,6 @@ class FlowResponseParams(typing_extensions.TypedDict): The user who created the Flow. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Flow Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Flow Version was committed. - """ - - status: VersionStatus - """ - The status of the Flow Version. - """ - last_used_at: dt.datetime version_logs_count: int """ diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py index 9faac4f7..190341b0 100644 --- a/src/humanloop/requests/populate_template_response.py +++ b/src/humanloop/requests/populate_template_response.py @@ -15,7 +15,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .input_response import InputResponseParams from .monitoring_evaluator_response import MonitoringEvaluatorResponseParams from .evaluator_aggregate import EvaluatorAggregateParams @@ -140,9 +139,14 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict): Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. """ - commit_message: typing_extensions.NotRequired[str] + version_name: typing_extensions.NotRequired[str] """ - Message describing the changes made. + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. """ description: typing_extensions.NotRequired[str] @@ -183,21 +187,6 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict): The user who created the Prompt. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Prompt Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Prompt Version was committed. - """ - - status: VersionStatus - """ - The status of the Prompt Version. - """ - last_used_at: dt.datetime version_logs_count: int """ diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py index 15c447b4..912866c5 100644 --- a/src/humanloop/requests/prompt_response.py +++ b/src/humanloop/requests/prompt_response.py @@ -16,7 +16,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .input_response import InputResponseParams from .evaluator_aggregate import EvaluatorAggregateParams import typing @@ -143,9 +142,14 @@ class PromptResponseParams(typing_extensions.TypedDict): Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. """ - commit_message: typing_extensions.NotRequired[str] + version_name: typing_extensions.NotRequired[str] """ - Message describing the changes made. + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the version, e.g., the changes made in this version. """ description: typing_extensions.NotRequired[str] @@ -186,21 +190,6 @@ class PromptResponseParams(typing_extensions.TypedDict): The user who created the Prompt. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Prompt Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Prompt Version was committed. - """ - - status: VersionStatus - """ - The status of the Prompt Version. - """ - last_used_at: dt.datetime version_logs_count: int """ diff --git a/src/humanloop/requests/tool_response.py b/src/humanloop/requests/tool_response.py index c09ceb2e..0261405e 100644 --- a/src/humanloop/requests/tool_response.py +++ b/src/humanloop/requests/tool_response.py @@ -9,7 +9,6 @@ from .environment_response import EnvironmentResponseParams import datetime as dt from ..types.user_response import UserResponse -from ..types.version_status import VersionStatus from .input_response import InputResponseParams from .evaluator_aggregate import EvaluatorAggregateParams import typing @@ -66,9 +65,14 @@ class ToolResponseParams(typing_extensions.TypedDict): Type of Tool. """ - commit_message: typing_extensions.NotRequired[str] + version_name: typing_extensions.NotRequired[str] """ - Message describing the changes made. + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + """ + + version_description: typing_extensions.NotRequired[str] + """ + Description of the Version. """ name: str @@ -109,21 +113,6 @@ class ToolResponseParams(typing_extensions.TypedDict): The user who created the Tool. """ - committed_by: typing_extensions.NotRequired[UserResponse] - """ - The user who committed the Tool Version. - """ - - committed_at: typing_extensions.NotRequired[dt.datetime] - """ - The date and time the Tool Version was committed. - """ - - status: VersionStatus - """ - The status of the Tool Version. - """ - last_used_at: dt.datetime version_logs_count: int """ diff --git a/src/humanloop/requests/update_version_request.py b/src/humanloop/requests/update_version_request.py new file mode 100644 index 00000000..cbdbd26b --- /dev/null +++ b/src/humanloop/requests/update_version_request.py @@ -0,0 +1,16 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing_extensions +import typing_extensions + + +class UpdateVersionRequestParams(typing_extensions.TypedDict): + name: typing_extensions.NotRequired[str] + """ + Name of the version. + """ + + description: typing_extensions.NotRequired[str] + """ + Description of the version. + """ diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py index a3e54db4..f6c91e47 100644 --- a/src/humanloop/tools/client.py +++ b/src/humanloop/tools/client.py @@ -2,27 +2,25 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .raw_client import RawToolsClient import datetime as dt from ..types.log_status import LogStatus from ..requests.tool_kernel_request import ToolKernelRequestParams from ..core.request_options import RequestOptions from ..types.create_tool_log_response import CreateToolLogResponse -from ..core.serialization import convert_and_respect_annotation_metadata -from ..core.unchecked_base_model import construct_type -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError from ..types.log_response import LogResponse -from ..core.jsonable_encoder import jsonable_encoder from ..types.project_sort_by import ProjectSortBy from ..types.sort_order import SortOrder from ..core.pagination import SyncPager from ..types.tool_response import ToolResponse from ..types.paginated_data_tool_response import PaginatedDataToolResponse +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError from ..requests.tool_function import ToolFunctionParams from ..types.files_tool_type import FilesToolType -from ..types.version_status import VersionStatus from ..types.list_tools import ListTools from ..types.file_environment_response import FileEnvironmentResponse from ..requests.evaluator_activation_deactivation_request_activate_item import ( @@ -32,6 +30,7 @@ EvaluatorActivationDeactivationRequestDeactivateItemParams, ) from ..core.client_wrapper import AsyncClientWrapper +from .raw_client import AsyncRawToolsClient from ..core.pagination import AsyncPager # this is used as the default value for optional parameters @@ -40,7 +39,18 @@ class ToolsClient: def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = RawToolsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> RawToolsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + RawToolsClient + """ + return self._raw_client def log( self, @@ -191,68 +201,34 @@ def log( output="35", ) """ - _response = self._client_wrapper.httpx_client.request( - "tools/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_log_request_environment, - "save": save, - "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.log( + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_log_request_environment=tool_log_request_environment, + save=save, + log_id=log_id, + tool=tool, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateToolLogResponse, - construct_type( - type_=CreateToolLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def update( self, @@ -346,53 +322,25 @@ def update( log_id="log_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.update( + id, + log_id, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def list( self, @@ -453,7 +401,7 @@ def list( yield page """ page = page if page is not None else 1 - _response = self._client_wrapper.httpx_client.request( + _response = self._raw_client._client_wrapper.httpx_client.request( "tools", method="GET", params={ @@ -512,7 +460,8 @@ def upsert( setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, tool_type: typing.Optional[FilesToolType] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> ToolResponse: """ @@ -520,9 +469,9 @@ def upsert( Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -547,8 +496,11 @@ def upsert( tool_type : typing.Optional[FilesToolType] Type of Tool. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -576,53 +528,21 @@ def upsert( "required": ["a", "b"], }, }, - commit_message="Initial commit", ) """ - _response = self._client_wrapper.httpx_client.request( - "tools", - method="POST", - json={ - "path": path, - "id": id, - "function": convert_and_respect_annotation_metadata( - object_=function, annotation=ToolFunctionParams, direction="write" - ), - "source_code": source_code, - "setup_values": setup_values, - "attributes": attributes, - "tool_type": tool_type, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = self._raw_client.upsert( + path=path, + id=id, + function=function, + source_code=source_code, + setup_values=setup_values, + attributes=attributes, + tool_type=tool_type, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def get( self, @@ -668,38 +588,10 @@ def get( id="tl_789ghi", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -728,28 +620,8 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = id="tl_789ghi", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete(id, request_options=request_options) + return response.data def move( self, @@ -793,48 +665,13 @@ def move( path="new directory/new name", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListTools: @@ -846,9 +683,6 @@ def list_versions( id : str Unique identifier for the Tool. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -869,49 +703,18 @@ def list_versions( ) client.tools.list_versions( id="tl_789ghi", - status="committed", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListTools, - construct_type( - type_=ListTools, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponse: + def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Tool with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Tool. Parameters ---------- @@ -921,16 +724,12 @@ def commit( version_id : str Unique identifier for the specific version of the Tool. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponse - Successful Response + None Examples -------- @@ -939,50 +738,25 @@ def commit( client = Humanloop( api_key="YOUR_API_KEY", ) - client.tools.commit( - id="tl_789ghi", - version_id="tv_012jkl", - commit_message="Initial commit", + client.tools.delete_tool_version( + id="id", + version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.delete_tool_version(id, version_id, request_options=request_options) + return response.data - def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: """ - Delete a version of the Tool. + Update the name or description of the Tool version. Parameters ---------- @@ -992,12 +766,19 @@ def delete_tool_version( version_id : str Unique identifier for the specific version of the Tool. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ToolResponse + Successful Response Examples -------- @@ -1006,33 +787,15 @@ def delete_tool_version( client = Humanloop( api_key="YOUR_API_KEY", ) - client.tools.delete_tool_version( + client.tools.update_tool_version( id="id", version_id="version_id", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = self._raw_client.update_tool_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -1075,37 +838,10 @@ def set_deployment( version_id="tv_012jkl", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1143,28 +879,8 @@ def remove_deployment( environment_id="staging", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -1196,34 +912,8 @@ def list_environments( id="tl_789ghi", ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = self._raw_client.list_environments(id, request_options=request_options) + return response.data def update_monitoring( self, @@ -1269,52 +959,26 @@ def update_monitoring( activate=[{"evaluator_version_id": "evv_1abc4308abd"}], ) """ - _response = self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data class AsyncToolsClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper + self._raw_client = AsyncRawToolsClient(client_wrapper=client_wrapper) + + @property + def with_raw_response(self) -> AsyncRawToolsClient: + """ + Retrieves a raw implementation of this client that returns raw responses. + + Returns + ------- + AsyncRawToolsClient + """ + return self._raw_client async def log( self, @@ -1473,68 +1137,34 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "tools/log", - method="POST", - params={ - "version_id": version_id, - "environment": environment, - }, - json={ - "path": path, - "id": id, - "start_time": start_time, - "end_time": end_time, - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "log_status": log_status, - "source_datapoint_id": source_datapoint_id, - "trace_parent_id": trace_parent_id, - "user": user, - "environment": tool_log_request_environment, - "save": save, - "log_id": log_id, - "tool": convert_and_respect_annotation_metadata( - object_=tool, annotation=ToolKernelRequestParams, direction="write" - ), - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.log( + version_id=version_id, + environment=environment, + path=path, + id=id, + start_time=start_time, + end_time=end_time, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + log_status=log_status, + source_datapoint_id=source_datapoint_id, + trace_parent_id=trace_parent_id, + user=user, + tool_log_request_environment=tool_log_request_environment, + save=save, + log_id=log_id, + tool=tool, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateToolLogResponse, - construct_type( - type_=CreateToolLogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def update( self, @@ -1636,53 +1266,25 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", - method="PATCH", - json={ - "output": output, - "created_at": created_at, - "error": error, - "provider_latency": provider_latency, - "stdout": stdout, - "provider_request": provider_request, - "provider_response": provider_response, - "inputs": inputs, - "source": source, - "metadata": metadata, - "start_time": start_time, - "end_time": end_time, - "log_status": log_status, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.update( + id, + log_id, + output=output, + created_at=created_at, + error=error, + provider_latency=provider_latency, + stdout=stdout, + provider_request=provider_request, + provider_response=provider_response, + inputs=inputs, + source=source, + metadata=metadata, + start_time=start_time, + end_time=end_time, + log_status=log_status, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LogResponse, - construct_type( - type_=LogResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def list( self, @@ -1751,7 +1353,7 @@ async def main() -> None: asyncio.run(main()) """ page = page if page is not None else 1 - _response = await self._client_wrapper.httpx_client.request( + _response = await self._raw_client._client_wrapper.httpx_client.request( "tools", method="GET", params={ @@ -1810,7 +1412,8 @@ async def upsert( setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, tool_type: typing.Optional[FilesToolType] = OMIT, - commit_message: typing.Optional[str] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> ToolResponse: """ @@ -1818,9 +1421,9 @@ async def upsert( Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. - If you provide a commit message, then the new version will be committed; - otherwise it will be uncommitted. If you try to commit an already committed version, - an exception will be raised. + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. Parameters ---------- @@ -1845,8 +1448,11 @@ async def upsert( tool_type : typing.Optional[FilesToolType] Type of Tool. - commit_message : typing.Optional[str] - Message describing the changes made. + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1882,56 +1488,24 @@ async def main() -> None: "required": ["a", "b"], }, }, - commit_message="Initial commit", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - "tools", - method="POST", - json={ - "path": path, - "id": id, - "function": convert_and_respect_annotation_metadata( - object_=function, annotation=ToolFunctionParams, direction="write" - ), - "source_code": source_code, - "setup_values": setup_values, - "attributes": attributes, - "tool_type": tool_type, - "commit_message": commit_message, - }, - headers={ - "content-type": "application/json", - }, + response = await self._raw_client.upsert( + path=path, + id=id, + function=function, + source_code=source_code, + setup_values=setup_values, + attributes=attributes, + tool_type=tool_type, + version_name=version_name, + version_description=version_description, request_options=request_options, - omit=OMIT, ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def get( self, @@ -1985,38 +1559,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="GET", - params={ - "version_id": version_id, - "environment": environment, - }, - request_options=request_options, + response = await self._raw_client.get( + id, version_id=version_id, environment=environment, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ @@ -2053,28 +1599,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete(id, request_options=request_options) + return response.data async def move( self, @@ -2126,48 +1652,13 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}", - method="PATCH", - json={ - "path": path, - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.move(id, path=path, name=name, request_options=request_options) + return response.data async def list_versions( self, id: str, *, - status: typing.Optional[VersionStatus] = None, evaluator_aggregates: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ListTools: @@ -2179,9 +1670,6 @@ async def list_versions( id : str Unique identifier for the Tool. - status : typing.Optional[VersionStatus] - Filter versions by status: 'uncommitted', 'committed'. If no status is provided, all versions are returned. - evaluator_aggregates : typing.Optional[bool] Whether to include Evaluator aggregate results for the versions in the response @@ -2207,52 +1695,21 @@ async def list_versions( async def main() -> None: await client.tools.list_versions( id="tl_789ghi", - status="committed", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions", - method="GET", - params={ - "status": status, - "evaluator_aggregates": evaluator_aggregates, - }, - request_options=request_options, + response = await self._raw_client.list_versions( + id, evaluator_aggregates=evaluator_aggregates, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ListTools, - construct_type( - type_=ListTools, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data - async def commit( - self, id: str, version_id: str, *, commit_message: str, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponse: + async def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> None: """ - Commit a version of the Tool with a commit message. - - If the version is already committed, an exception will be raised. + Delete a version of the Tool. Parameters ---------- @@ -2262,16 +1719,12 @@ async def commit( version_id : str Unique identifier for the specific version of the Tool. - commit_message : str - Message describing the changes made. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponse - Successful Response + None Examples -------- @@ -2285,53 +1738,28 @@ async def commit( async def main() -> None: - await client.tools.commit( - id="tl_789ghi", - version_id="tv_012jkl", - commit_message="Initial commit", + await client.tools.delete_tool_version( + id="id", + version_id="version_id", ) asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/commit", - method="POST", - json={ - "commit_message": commit_message, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.delete_tool_version(id, version_id, request_options=request_options) + return response.data - async def delete_tool_version( - self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> None: + async def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> ToolResponse: """ - Delete a version of the Tool. + Update the name or description of the Tool version. Parameters ---------- @@ -2341,12 +1769,19 @@ async def delete_tool_version( version_id : str Unique identifier for the specific version of the Tool. + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ToolResponse + Successful Response Examples -------- @@ -2360,7 +1795,7 @@ async def delete_tool_version( async def main() -> None: - await client.tools.delete_tool_version( + await client.tools.update_tool_version( id="id", version_id="version_id", ) @@ -2368,28 +1803,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", - method="DELETE", - request_options=request_options, + response = await self._raw_client.update_tool_version( + id, version_id, name=name, description=description, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def set_deployment( self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None @@ -2440,37 +1857,10 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="POST", - params={ - "version_id": version_id, - }, - request_options=request_options, + response = await self._raw_client.set_deployment( + id, environment_id, version_id=version_id, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data async def remove_deployment( self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2516,28 +1906,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", - method="DELETE", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.remove_deployment(id, environment_id, request_options=request_options) + return response.data async def list_environments( self, id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -2577,34 +1947,8 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/environments", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[FileEnvironmentResponse], - construct_type( - type_=typing.List[FileEnvironmentResponse], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + response = await self._raw_client.list_environments(id, request_options=request_options) + return response.data async def update_monitoring( self, @@ -2658,44 +2002,7 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( - f"tools/{jsonable_encoder(id)}/evaluators", - method="POST", - json={ - "activate": convert_and_respect_annotation_metadata( - object_=activate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], - direction="write", - ), - "deactivate": convert_and_respect_annotation_metadata( - object_=deactivate, - annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], - direction="write", - ), - }, - request_options=request_options, - omit=OMIT, + response = await self._raw_client.update_monitoring( + id, activate=activate, deactivate=deactivate, request_options=request_options ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponse, - construct_type( - type_=ToolResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + return response.data diff --git a/src/humanloop/tools/raw_client.py b/src/humanloop/tools/raw_client.py new file mode 100644 index 00000000..4a1f29e9 --- /dev/null +++ b/src/humanloop/tools/raw_client.py @@ -0,0 +1,2046 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from ..core.client_wrapper import SyncClientWrapper +import datetime as dt +from ..types.log_status import LogStatus +from ..requests.tool_kernel_request import ToolKernelRequestParams +from ..core.request_options import RequestOptions +from ..core.http_response import HttpResponse +from ..types.create_tool_log_response import CreateToolLogResponse +from ..core.serialization import convert_and_respect_annotation_metadata +from ..core.unchecked_base_model import construct_type +from ..errors.unprocessable_entity_error import UnprocessableEntityError +from ..types.http_validation_error import HttpValidationError +from json.decoder import JSONDecodeError +from ..core.api_error import ApiError +from ..types.log_response import LogResponse +from ..core.jsonable_encoder import jsonable_encoder +from ..requests.tool_function import ToolFunctionParams +from ..types.files_tool_type import FilesToolType +from ..types.tool_response import ToolResponse +from ..types.list_tools import ListTools +from ..types.file_environment_response import FileEnvironmentResponse +from ..requests.evaluator_activation_deactivation_request_activate_item import ( + EvaluatorActivationDeactivationRequestActivateItemParams, +) +from ..requests.evaluator_activation_deactivation_request_deactivate_item import ( + EvaluatorActivationDeactivationRequestDeactivateItemParams, +) +from ..core.client_wrapper import AsyncClientWrapper +from ..core.http_response import AsyncHttpResponse + +# this is used as the default value for optional parameters +OMIT = typing.cast(typing.Any, ...) + + +class RawToolsClient: + def __init__(self, *, client_wrapper: SyncClientWrapper): + self._client_wrapper = client_wrapper + + def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[CreateToolLogResponse]: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[CreateToolLogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_log_request_environment, + "save": save, + "log_id": log_id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateToolLogResponse, + construct_type( + type_=CreateToolLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[LogResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + "tools", + method="POST", + json={ + "path": path, + "id": id, + "function": convert_and_respect_annotation_metadata( + object_=function, annotation=ToolFunctionParams, direction="write" + ), + "source_code": source_code, + "setup_values": setup_values, + "attributes": attributes, + "tool_type": tool_type, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] = None) -> HttpResponse[None]: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ListTools]: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ListTools] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListTools, + construct_type( + type_=ListTools, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[ToolResponse]: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[None]: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[None] + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return HttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> HttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> HttpResponse[ToolResponse]: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + HttpResponse[ToolResponse] + Successful Response + """ + _response = self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return HttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncRawToolsClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def log( + self, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + source_datapoint_id: typing.Optional[str] = OMIT, + trace_parent_id: typing.Optional[str] = OMIT, + user: typing.Optional[str] = OMIT, + tool_log_request_environment: typing.Optional[str] = OMIT, + save: typing.Optional[bool] = OMIT, + log_id: typing.Optional[str] = OMIT, + tool: typing.Optional[ToolKernelRequestParams] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[CreateToolLogResponse]: + """ + Log to a Tool. + + You can use query parameters `version_id`, or `environment`, to target + an existing version of the Tool. Otherwise the default deployed version will be chosen. + + Instead of targeting an existing version explicitly, you can instead pass in + Tool details in the request body. In this case, we will check if the details correspond + to an existing version of the Tool, if not we will create a new version. This is helpful + in the case where you are storing or deriving your Tool details in code. + + Parameters + ---------- + version_id : typing.Optional[str] + A specific Version ID of the Tool to log to. + + environment : typing.Optional[str] + Name of the Environment identifying a deployed version to log to. + + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + source_datapoint_id : typing.Optional[str] + Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair. + + trace_parent_id : typing.Optional[str] + The ID of the parent Log to nest this Log under in a Trace. + + user : typing.Optional[str] + End-user ID related to the Log. + + tool_log_request_environment : typing.Optional[str] + The name of the Environment the Log is associated to. + + save : typing.Optional[bool] + Whether the request/response payloads will be stored on Humanloop. + + log_id : typing.Optional[str] + This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you. + + tool : typing.Optional[ToolKernelRequestParams] + Details of your Tool. A new Tool version will be created if the provided details are new. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[CreateToolLogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools/log", + method="POST", + params={ + "version_id": version_id, + "environment": environment, + }, + json={ + "path": path, + "id": id, + "start_time": start_time, + "end_time": end_time, + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "log_status": log_status, + "source_datapoint_id": source_datapoint_id, + "trace_parent_id": trace_parent_id, + "user": user, + "environment": tool_log_request_environment, + "save": save, + "log_id": log_id, + "tool": convert_and_respect_annotation_metadata( + object_=tool, annotation=ToolKernelRequestParams, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + CreateToolLogResponse, + construct_type( + type_=CreateToolLogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update( + self, + id: str, + log_id: str, + *, + output: typing.Optional[str] = OMIT, + created_at: typing.Optional[dt.datetime] = OMIT, + error: typing.Optional[str] = OMIT, + provider_latency: typing.Optional[float] = OMIT, + stdout: typing.Optional[str] = OMIT, + provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + source: typing.Optional[str] = OMIT, + metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + start_time: typing.Optional[dt.datetime] = OMIT, + end_time: typing.Optional[dt.datetime] = OMIT, + log_status: typing.Optional[LogStatus] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[LogResponse]: + """ + Update a Log. + + Update the details of a Log with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Prompt. + + log_id : str + Unique identifier for the Log. + + output : typing.Optional[str] + Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later. + + created_at : typing.Optional[dt.datetime] + User defined timestamp for when the log was created. + + error : typing.Optional[str] + Error message if the log is an error. + + provider_latency : typing.Optional[float] + Duration of the logged event in seconds. + + stdout : typing.Optional[str] + Captured log and debug statements. + + provider_request : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw request sent to provider. + + provider_response : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Raw response received the provider. + + inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + The inputs passed to the prompt template. + + source : typing.Optional[str] + Identifies where the model was called from. + + metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Any additional metadata to record. + + start_time : typing.Optional[dt.datetime] + When the logged event started. + + end_time : typing.Optional[dt.datetime] + When the logged event ended. + + log_status : typing.Optional[LogStatus] + Status of a Log. Set to `incomplete` if you intend to update and eventually complete the Log and want the File's monitoring Evaluators to wait until you mark it as `complete`. If log_status is not provided, observability will pick up the Log as soon as possible. Updating this from specified to unspecified is undefined behavior. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[LogResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/log/{jsonable_encoder(log_id)}", + method="PATCH", + json={ + "output": output, + "created_at": created_at, + "error": error, + "provider_latency": provider_latency, + "stdout": stdout, + "provider_request": provider_request, + "provider_response": provider_response, + "inputs": inputs, + "source": source, + "metadata": metadata, + "start_time": start_time, + "end_time": end_time, + "log_status": log_status, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + LogResponse, + construct_type( + type_=LogResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def upsert( + self, + *, + path: typing.Optional[str] = OMIT, + id: typing.Optional[str] = OMIT, + function: typing.Optional[ToolFunctionParams] = OMIT, + source_code: typing.Optional[str] = OMIT, + setup_values: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + attributes: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + tool_type: typing.Optional[FilesToolType] = OMIT, + version_name: typing.Optional[str] = OMIT, + version_description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Create a Tool or update it with a new version if it already exists. + + Tools are identified by the `ID` or their `path`. The name, description and parameters determine the versions of the Tool. + + You can provide `version_name` and `version_description` to identify and describe your versions. + Version names must be unique within a Tool - attempting to create a version with a name + that already exists will result in a 409 Conflict error. + + Parameters + ---------- + path : typing.Optional[str] + Path of the Tool, including the name. This locates the Tool in the Humanloop filesystem and is used as as a unique identifier. For example: `folder/name` or just `name`. + + id : typing.Optional[str] + ID for an existing Tool. + + function : typing.Optional[ToolFunctionParams] + Callable function specification of the Tool shown to the model for tool calling. + + source_code : typing.Optional[str] + Code source of the Tool. + + setup_values : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Values needed to setup the Tool, defined in JSON Schema format: https://json-schema.org/ + + attributes : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Additional fields to describe the Tool. Helpful to separate Tool versions from each other with details on how they were created or used. + + tool_type : typing.Optional[FilesToolType] + Type of Tool. + + version_name : typing.Optional[str] + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + + version_description : typing.Optional[str] + Description of the Version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + "tools", + method="POST", + json={ + "path": path, + "id": id, + "function": convert_and_respect_annotation_metadata( + object_=function, annotation=ToolFunctionParams, direction="write" + ), + "source_code": source_code, + "setup_values": setup_values, + "attributes": attributes, + "tool_type": tool_type, + "version_name": version_name, + "version_description": version_description, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def get( + self, + id: str, + *, + version_id: typing.Optional[str] = None, + environment: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Retrieve the Tool with the given ID. + + By default, the deployed version of the Tool is returned. Use the query parameters + `version_id` or `environment` to target a specific version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : typing.Optional[str] + A specific Version ID of the Tool to retrieve. + + environment : typing.Optional[str] + Name of the Environment to retrieve a deployed Version from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="GET", + params={ + "version_id": version_id, + "environment": environment, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete the Tool with the given ID. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def move( + self, + id: str, + *, + path: typing.Optional[str] = OMIT, + name: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Move the Tool to a different path or change the name. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + path : typing.Optional[str] + Path of the Tool including the Tool name, which is used as a unique identifier. + + name : typing.Optional[str] + Name of the Tool, which is used as a unique identifier. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}", + method="PATCH", + json={ + "path": path, + "name": name, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_versions( + self, + id: str, + *, + evaluator_aggregates: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ListTools]: + """ + Get a list of all the versions of a Tool. + + Parameters + ---------- + id : str + Unique identifier for the Tool. + + evaluator_aggregates : typing.Optional[bool] + Whether to include Evaluator aggregate results for the versions in the response + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ListTools] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions", + method="GET", + params={ + "evaluator_aggregates": evaluator_aggregates, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ListTools, + construct_type( + type_=ListTools, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_tool_version( + self, id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Delete a version of the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_tool_version( + self, + id: str, + version_id: str, + *, + name: typing.Optional[str] = OMIT, + description: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Update the name or description of the Tool version. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + version_id : str + Unique identifier for the specific version of the Tool. + + name : typing.Optional[str] + Name of the version. + + description : typing.Optional[str] + Description of the version. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}", + method="PATCH", + json={ + "name": name, + "description": description, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def set_deployment( + self, id: str, environment_id: str, *, version_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[ToolResponse]: + """ + Deploy Tool to an Environment. + + Set the deployed version for the specified Environment. This Prompt + will be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to deploy the Version to. + + version_id : str + Unique identifier for the specific version of the Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="POST", + params={ + "version_id": version_id, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def remove_deployment( + self, id: str, environment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[None]: + """ + Remove deployed Tool from the Environment. + + Remove the deployed version for the specified Environment. This Tool + will no longer be used for calls made to the Tool in this Environment. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + environment_id : str + Unique identifier for the Environment to remove the deployment from. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[None] + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments/{jsonable_encoder(environment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return AsyncHttpResponse(response=_response, data=None) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def list_environments( + self, id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsyncHttpResponse[typing.List[FileEnvironmentResponse]]: + """ + List all Environments and their deployed versions for the Tool. + + Parameters + ---------- + id : str + Unique identifier for Tool. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[typing.List[FileEnvironmentResponse]] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/environments", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + typing.List[FileEnvironmentResponse], + construct_type( + type_=typing.List[FileEnvironmentResponse], # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_monitoring( + self, + id: str, + *, + activate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] = OMIT, + deactivate: typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AsyncHttpResponse[ToolResponse]: + """ + Activate and deactivate Evaluators for monitoring the Tool. + + An activated Evaluator will automatically be run on all new Logs + within the Tool for monitoring purposes. + + Parameters + ---------- + id : str + + activate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams]] + Evaluators to activate for Monitoring. These will be automatically run on new Logs. + + deactivate : typing.Optional[typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams]] + Evaluators to deactivate. These will not be run on new Logs. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + AsyncHttpResponse[ToolResponse] + Successful Response + """ + _response = await self._client_wrapper.httpx_client.request( + f"tools/{jsonable_encoder(id)}/evaluators", + method="POST", + json={ + "activate": convert_and_respect_annotation_metadata( + object_=activate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestActivateItemParams], + direction="write", + ), + "deactivate": convert_and_respect_annotation_metadata( + object_=deactivate, + annotation=typing.Sequence[EvaluatorActivationDeactivationRequestDeactivateItemParams], + direction="write", + ), + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + _data = typing.cast( + ToolResponse, + construct_type( + type_=ToolResponse, # type: ignore + object_=_response.json(), + ), + ) + return AsyncHttpResponse(response=_response, data=_data) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py index 99475cde..156f4e9a 100644 --- a/src/humanloop/types/__init__.py +++ b/src/humanloop/types/__init__.py @@ -9,7 +9,6 @@ from .chat_role import ChatRole from .chat_tool_type import ChatToolType from .code_evaluator_request import CodeEvaluatorRequest -from .commit_request import CommitRequest from .config_tool_response import ConfigToolResponse from .create_datapoint_request import CreateDatapointRequest from .create_datapoint_request_target_value import CreateDatapointRequestTargetValue @@ -152,6 +151,7 @@ from .tool_response import ToolResponse from .update_dateset_action import UpdateDatesetAction from .update_evaluation_status_request import UpdateEvaluationStatusRequest +from .update_version_request import UpdateVersionRequest from .user_response import UserResponse from .valence import Valence from .validation_error import ValidationError @@ -176,7 +176,6 @@ "ChatRole", "ChatToolType", "CodeEvaluatorRequest", - "CommitRequest", "ConfigToolResponse", "CreateDatapointRequest", "CreateDatapointRequestTargetValue", @@ -313,6 +312,7 @@ "ToolResponse", "UpdateDatesetAction", "UpdateEvaluationStatusRequest", + "UpdateVersionRequest", "UserResponse", "Valence", "ValidationError", diff --git a/src/humanloop/types/dataset_response.py b/src/humanloop/types/dataset_response.py index 41b44cba..af79f597 100644 --- a/src/humanloop/types/dataset_response.py +++ b/src/humanloop/types/dataset_response.py @@ -6,7 +6,6 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .datapoint_response import DatapointResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -72,25 +71,15 @@ class DatasetResponse(UncheckedBaseModel): The user who created the Dataset. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Dataset Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Dataset Version was committed. - """ - - status: VersionStatus = pydantic.Field() + last_used_at: dt.datetime + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - The status of the Dataset Version. + Unique name for the Dataset version. Version names must be unique for a given Dataset. """ - last_used_at: dt.datetime - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_description: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. If provided, a committed version of the Dataset is created. Otherwise, an uncommitted version is created. + Description of the version, e.g., the changes made in this version. """ datapoints_count: int = pydantic.Field() diff --git a/src/humanloop/types/directory_with_parents_and_children_response.py b/src/humanloop/types/directory_with_parents_and_children_response.py index 090b5c98..5828a678 100644 --- a/src/humanloop/types/directory_with_parents_and_children_response.py +++ b/src/humanloop/types/directory_with_parents_and_children_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -15,7 +14,6 @@ from .directory_response import DirectoryResponse from .directory_with_parents_and_children_response_files_item import DirectoryWithParentsAndChildrenResponseFilesItem from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class DirectoryWithParentsAndChildrenResponse(UncheckedBaseModel): @@ -79,16 +77,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) -update_forward_refs(FlowResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) -update_forward_refs( - MonitoringEvaluatorResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse -) -update_forward_refs(PromptResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) -update_forward_refs(ToolResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) -update_forward_refs( - VersionDeploymentResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse -) -update_forward_refs(VersionIdResponse, DirectoryWithParentsAndChildrenResponse=DirectoryWithParentsAndChildrenResponse) diff --git a/src/humanloop/types/evaluatee_response.py b/src/humanloop/types/evaluatee_response.py index 97007485..9ba9fe4d 100644 --- a/src/humanloop/types/evaluatee_response.py +++ b/src/humanloop/types/evaluatee_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -14,7 +13,6 @@ import pydantic import datetime as dt from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class EvaluateeResponse(UncheckedBaseModel): @@ -51,12 +49,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(FlowResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(PromptResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(ToolResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(VersionDeploymentResponse, EvaluateeResponse=EvaluateeResponse) -update_forward_refs(VersionIdResponse, EvaluateeResponse=EvaluateeResponse) diff --git a/src/humanloop/types/evaluation_evaluator_response.py b/src/humanloop/types/evaluation_evaluator_response.py index 84b3c746..413081c6 100644 --- a/src/humanloop/types/evaluation_evaluator_response.py +++ b/src/humanloop/types/evaluation_evaluator_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -13,7 +12,6 @@ import datetime as dt from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -from ..core.pydantic_utilities import update_forward_refs class EvaluationEvaluatorResponse(UncheckedBaseModel): @@ -36,12 +34,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(FlowResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(PromptResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(ToolResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) -update_forward_refs(VersionIdResponse, EvaluationEvaluatorResponse=EvaluationEvaluatorResponse) diff --git a/src/humanloop/types/evaluation_log_response.py b/src/humanloop/types/evaluation_log_response.py index 32ff5b40..6c931db0 100644 --- a/src/humanloop/types/evaluation_log_response.py +++ b/src/humanloop/types/evaluation_log_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse @@ -18,7 +17,6 @@ from .datapoint_response import DatapointResponse from .log_response import LogResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class EvaluationLogResponse(UncheckedBaseModel): @@ -50,16 +48,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorLogResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(EvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(FlowLogResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(FlowResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(PromptLogResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(PromptResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(ToolLogResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(ToolResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationLogResponse=EvaluationLogResponse) -update_forward_refs(VersionIdResponse, EvaluationLogResponse=EvaluationLogResponse) diff --git a/src/humanloop/types/evaluation_response.py b/src/humanloop/types/evaluation_response.py index a4c2336a..f113fff5 100644 --- a/src/humanloop/types/evaluation_response.py +++ b/src/humanloop/types/evaluation_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -15,7 +14,6 @@ import datetime as dt from .user_response import UserResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class EvaluationResponse(UncheckedBaseModel): @@ -60,12 +58,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(FlowResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(PromptResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(ToolResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationResponse=EvaluationResponse) -update_forward_refs(VersionIdResponse, EvaluationResponse=EvaluationResponse) diff --git a/src/humanloop/types/evaluation_run_response.py b/src/humanloop/types/evaluation_run_response.py index 46f9308d..1203ce2c 100644 --- a/src/humanloop/types/evaluation_run_response.py +++ b/src/humanloop/types/evaluation_run_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -17,7 +16,6 @@ from .user_response import UserResponse from .evaluation_status import EvaluationStatus from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class EvaluationRunResponse(UncheckedBaseModel): @@ -74,12 +72,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(FlowResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(PromptResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(ToolResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationRunResponse=EvaluationRunResponse) -update_forward_refs(VersionIdResponse, EvaluationRunResponse=EvaluationRunResponse) diff --git a/src/humanloop/types/evaluation_runs_response.py b/src/humanloop/types/evaluation_runs_response.py index 208a7529..d91e1ee9 100644 --- a/src/humanloop/types/evaluation_runs_response.py +++ b/src/humanloop/types/evaluation_runs_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -13,7 +12,6 @@ from .evaluation_run_response import EvaluationRunResponse import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class EvaluationRunsResponse(UncheckedBaseModel): @@ -30,12 +28,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(FlowResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(PromptResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(ToolResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(VersionDeploymentResponse, EvaluationRunsResponse=EvaluationRunsResponse) -update_forward_refs(VersionIdResponse, EvaluationRunsResponse=EvaluationRunsResponse) diff --git a/src/humanloop/types/evaluator_log_response.py b/src/humanloop/types/evaluator_log_response.py index 967217c5..e457d580 100644 --- a/src/humanloop/types/evaluator_log_response.py +++ b/src/humanloop/types/evaluator_log_response.py @@ -194,14 +194,4 @@ class Config: from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(FlowLogResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(FlowResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(PromptLogResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(PromptResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(ToolLogResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(ToolResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(VersionDeploymentResponse, EvaluatorLogResponse=EvaluatorLogResponse) -update_forward_refs(VersionIdResponse, EvaluatorLogResponse=EvaluatorLogResponse) update_forward_refs(EvaluatorLogResponse) diff --git a/src/humanloop/types/evaluator_response.py b/src/humanloop/types/evaluator_response.py index 0743086c..175f456d 100644 --- a/src/humanloop/types/evaluator_response.py +++ b/src/humanloop/types/evaluator_response.py @@ -8,7 +8,6 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .input_response import InputResponse from .evaluator_aggregate import EvaluatorAggregate from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -35,9 +34,14 @@ class EvaluatorResponse(UncheckedBaseModel): ID of the directory that the file is in on Humanloop. """ - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. + Unique name for the Evaluator version. Version names must be unique for a given Evaluator. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. """ spec: EvaluatorResponseSpec @@ -79,17 +83,6 @@ class EvaluatorResponse(UncheckedBaseModel): The user who created the Evaluator. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Evaluator Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Evaluator Version was committed. - """ - - status: VersionStatus last_used_at: dt.datetime version_logs_count: int = pydantic.Field() """ @@ -138,10 +131,4 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 -update_forward_refs(FlowResponse, EvaluatorResponse=EvaluatorResponse) -update_forward_refs(MonitoringEvaluatorResponse, EvaluatorResponse=EvaluatorResponse) -update_forward_refs(PromptResponse, EvaluatorResponse=EvaluatorResponse) -update_forward_refs(ToolResponse, EvaluatorResponse=EvaluatorResponse) -update_forward_refs(VersionDeploymentResponse, EvaluatorResponse=EvaluatorResponse) -update_forward_refs(VersionIdResponse, EvaluatorResponse=EvaluatorResponse) update_forward_refs(EvaluatorResponse) diff --git a/src/humanloop/types/file_environment_response.py b/src/humanloop/types/file_environment_response.py index 8f0dc1a8..70ed322f 100644 --- a/src/humanloop/types/file_environment_response.py +++ b/src/humanloop/types/file_environment_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -15,7 +14,6 @@ from .file_environment_response_file import FileEnvironmentResponseFile import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class FileEnvironmentResponse(UncheckedBaseModel): @@ -42,12 +40,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(FlowResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(MonitoringEvaluatorResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(PromptResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(ToolResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(VersionDeploymentResponse, FileEnvironmentResponse=FileEnvironmentResponse) -update_forward_refs(VersionIdResponse, FileEnvironmentResponse=FileEnvironmentResponse) diff --git a/src/humanloop/types/flow_log_response.py b/src/humanloop/types/flow_log_response.py index 01ae2cb0..ba1e1cf6 100644 --- a/src/humanloop/types/flow_log_response.py +++ b/src/humanloop/types/flow_log_response.py @@ -178,14 +178,4 @@ class Config: from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 -update_forward_refs(EvaluatorLogResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(EvaluatorResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(FlowResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(PromptLogResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(PromptResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(ToolLogResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(ToolResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(VersionDeploymentResponse, FlowLogResponse=FlowLogResponse) -update_forward_refs(VersionIdResponse, FlowLogResponse=FlowLogResponse) update_forward_refs(FlowLogResponse) diff --git a/src/humanloop/types/flow_response.py b/src/humanloop/types/flow_response.py index f90dcca2..4017b3b7 100644 --- a/src/humanloop/types/flow_response.py +++ b/src/humanloop/types/flow_response.py @@ -7,7 +7,6 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .evaluator_aggregate import EvaluatorAggregate from ..core.pydantic_utilities import IS_PYDANTIC_V2 from ..core.pydantic_utilities import update_forward_refs @@ -38,9 +37,14 @@ class FlowResponse(UncheckedBaseModel): A key-value object identifying the Flow Version. """ - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. If provided, a committed version of the Flow is created. Otherwise, an uncommitted version is created. + Unique name for the Flow version. Version names must be unique for a given Flow. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Version. """ name: str = pydantic.Field() @@ -81,21 +85,6 @@ class FlowResponse(UncheckedBaseModel): The user who created the Flow. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Flow Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Flow Version was committed. - """ - - status: VersionStatus = pydantic.Field() - """ - The status of the Flow Version. - """ - last_used_at: dt.datetime version_logs_count: int = pydantic.Field() """ @@ -129,10 +118,4 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, FlowResponse=FlowResponse) -update_forward_refs(MonitoringEvaluatorResponse, FlowResponse=FlowResponse) -update_forward_refs(PromptResponse, FlowResponse=FlowResponse) -update_forward_refs(ToolResponse, FlowResponse=FlowResponse) -update_forward_refs(VersionDeploymentResponse, FlowResponse=FlowResponse) -update_forward_refs(VersionIdResponse, FlowResponse=FlowResponse) update_forward_refs(FlowResponse) diff --git a/src/humanloop/types/list_evaluators.py b/src/humanloop/types/list_evaluators.py index 341ec7ba..61edbec5 100644 --- a/src/humanloop/types/list_evaluators.py +++ b/src/humanloop/types/list_evaluators.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class ListEvaluators(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, ListEvaluators=ListEvaluators) -update_forward_refs(FlowResponse, ListEvaluators=ListEvaluators) -update_forward_refs(MonitoringEvaluatorResponse, ListEvaluators=ListEvaluators) -update_forward_refs(PromptResponse, ListEvaluators=ListEvaluators) -update_forward_refs(ToolResponse, ListEvaluators=ListEvaluators) -update_forward_refs(VersionDeploymentResponse, ListEvaluators=ListEvaluators) -update_forward_refs(VersionIdResponse, ListEvaluators=ListEvaluators) diff --git a/src/humanloop/types/list_flows.py b/src/humanloop/types/list_flows.py index bf593240..686dab26 100644 --- a/src/humanloop/types/list_flows.py +++ b/src/humanloop/types/list_flows.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class ListFlows(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, ListFlows=ListFlows) -update_forward_refs(FlowResponse, ListFlows=ListFlows) -update_forward_refs(MonitoringEvaluatorResponse, ListFlows=ListFlows) -update_forward_refs(PromptResponse, ListFlows=ListFlows) -update_forward_refs(ToolResponse, ListFlows=ListFlows) -update_forward_refs(VersionDeploymentResponse, ListFlows=ListFlows) -update_forward_refs(VersionIdResponse, ListFlows=ListFlows) diff --git a/src/humanloop/types/list_prompts.py b/src/humanloop/types/list_prompts.py index 34253841..94cda05e 100644 --- a/src/humanloop/types/list_prompts.py +++ b/src/humanloop/types/list_prompts.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class ListPrompts(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, ListPrompts=ListPrompts) -update_forward_refs(FlowResponse, ListPrompts=ListPrompts) -update_forward_refs(MonitoringEvaluatorResponse, ListPrompts=ListPrompts) -update_forward_refs(PromptResponse, ListPrompts=ListPrompts) -update_forward_refs(ToolResponse, ListPrompts=ListPrompts) -update_forward_refs(VersionDeploymentResponse, ListPrompts=ListPrompts) -update_forward_refs(VersionIdResponse, ListPrompts=ListPrompts) diff --git a/src/humanloop/types/list_tools.py b/src/humanloop/types/list_tools.py index bf170eb9..4080a6a1 100644 --- a/src/humanloop/types/list_tools.py +++ b/src/humanloop/types/list_tools.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class ListTools(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, ListTools=ListTools) -update_forward_refs(FlowResponse, ListTools=ListTools) -update_forward_refs(MonitoringEvaluatorResponse, ListTools=ListTools) -update_forward_refs(PromptResponse, ListTools=ListTools) -update_forward_refs(ToolResponse, ListTools=ListTools) -update_forward_refs(VersionDeploymentResponse, ListTools=ListTools) -update_forward_refs(VersionIdResponse, ListTools=ListTools) diff --git a/src/humanloop/types/monitoring_evaluator_response.py b/src/humanloop/types/monitoring_evaluator_response.py index 4d79fcdc..e70dc4fb 100644 --- a/src/humanloop/types/monitoring_evaluator_response.py +++ b/src/humanloop/types/monitoring_evaluator_response.py @@ -47,10 +47,4 @@ class Config: from .version_id_response import VersionIdResponse # noqa: E402 from .version_reference_response import VersionReferenceResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) -update_forward_refs(FlowResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) -update_forward_refs(PromptResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) -update_forward_refs(ToolResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) -update_forward_refs(VersionDeploymentResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) -update_forward_refs(VersionIdResponse, MonitoringEvaluatorResponse=MonitoringEvaluatorResponse) update_forward_refs(MonitoringEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_evaluation_log_response.py b/src/humanloop/types/paginated_data_evaluation_log_response.py index c6e19791..9e3c568e 100644 --- a/src/humanloop/types/paginated_data_evaluation_log_response.py +++ b/src/humanloop/types/paginated_data_evaluation_log_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse @@ -17,7 +16,6 @@ from .evaluation_log_response import EvaluationLogResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataEvaluationLogResponse(UncheckedBaseModel): @@ -34,16 +32,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(EvaluatorResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(FlowLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(FlowResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(PromptLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(PromptResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(ToolLogResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(ToolResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) -update_forward_refs(VersionIdResponse, PaginatedDataEvaluationLogResponse=PaginatedDataEvaluationLogResponse) diff --git a/src/humanloop/types/paginated_data_evaluator_response.py b/src/humanloop/types/paginated_data_evaluator_response.py index 86ee982a..275f0528 100644 --- a/src/humanloop/types/paginated_data_evaluator_response.py +++ b/src/humanloop/types/paginated_data_evaluator_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataEvaluatorResponse(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(FlowResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(PromptResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(ToolResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) -update_forward_refs(VersionIdResponse, PaginatedDataEvaluatorResponse=PaginatedDataEvaluatorResponse) diff --git a/src/humanloop/types/paginated_data_flow_response.py b/src/humanloop/types/paginated_data_flow_response.py index 9ffc2eb6..990d58be 100644 --- a/src/humanloop/types/paginated_data_flow_response.py +++ b/src/humanloop/types/paginated_data_flow_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataFlowResponse(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(FlowResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(PromptResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(ToolResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) -update_forward_refs(VersionIdResponse, PaginatedDataFlowResponse=PaginatedDataFlowResponse) diff --git a/src/humanloop/types/paginated_data_log_response.py b/src/humanloop/types/paginated_data_log_response.py index 7352f17e..57bae587 100644 --- a/src/humanloop/types/paginated_data_log_response.py +++ b/src/humanloop/types/paginated_data_log_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_log_response import EvaluatorLogResponse from .evaluator_response import EvaluatorResponse @@ -17,7 +16,6 @@ from .log_response import LogResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataLogResponse(UncheckedBaseModel): @@ -34,16 +32,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(EvaluatorResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(FlowLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(FlowResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(PromptLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(PromptResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(ToolLogResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(ToolResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) -update_forward_refs(VersionIdResponse, PaginatedDataLogResponse=PaginatedDataLogResponse) diff --git a/src/humanloop/types/paginated_data_prompt_response.py b/src/humanloop/types/paginated_data_prompt_response.py index db64dd96..ff71e584 100644 --- a/src/humanloop/types/paginated_data_prompt_response.py +++ b/src/humanloop/types/paginated_data_prompt_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataPromptResponse(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(FlowResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(PromptResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(ToolResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) -update_forward_refs(VersionIdResponse, PaginatedDataPromptResponse=PaginatedDataPromptResponse) diff --git a/src/humanloop/types/paginated_data_tool_response.py b/src/humanloop/types/paginated_data_tool_response.py index e7ae59a9..0e52b361 100644 --- a/src/humanloop/types/paginated_data_tool_response.py +++ b/src/humanloop/types/paginated_data_tool_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -12,7 +11,6 @@ import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataToolResponse(UncheckedBaseModel): @@ -29,12 +27,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(FlowResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(PromptResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(ToolResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) -update_forward_refs(VersionIdResponse, PaginatedDataToolResponse=PaginatedDataToolResponse) diff --git a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py index 0e982fbc..bd7082b3 100644 --- a/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py +++ b/src/humanloop/types/paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -15,7 +14,6 @@ ) from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse(UncheckedBaseModel): @@ -34,33 +32,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs( - EvaluatorResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - FlowResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - MonitoringEvaluatorResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - PromptResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - ToolResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - VersionDeploymentResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) -update_forward_refs( - VersionIdResponse, - PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse=PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponse, -) diff --git a/src/humanloop/types/paginated_evaluation_response.py b/src/humanloop/types/paginated_evaluation_response.py index 59ec4400..78e177e8 100644 --- a/src/humanloop/types/paginated_evaluation_response.py +++ b/src/humanloop/types/paginated_evaluation_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -13,7 +12,6 @@ from .evaluation_response import EvaluationResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -from ..core.pydantic_utilities import update_forward_refs class PaginatedEvaluationResponse(UncheckedBaseModel): @@ -30,12 +28,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(FlowResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(MonitoringEvaluatorResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(PromptResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(ToolResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(VersionDeploymentResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) -update_forward_refs(VersionIdResponse, PaginatedEvaluationResponse=PaginatedEvaluationResponse) diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py index f1674347..d587d175 100644 --- a/src/humanloop/types/populate_template_response.py +++ b/src/humanloop/types/populate_template_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -23,12 +22,10 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .input_response import InputResponse from .evaluator_aggregate import EvaluatorAggregate from .populate_template_response_populated_template import PopulateTemplateResponsePopulatedTemplate from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class PopulateTemplateResponse(UncheckedBaseModel): @@ -149,9 +146,14 @@ class PopulateTemplateResponse(UncheckedBaseModel): Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. """ - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. """ description: typing.Optional[str] = pydantic.Field(default=None) @@ -192,21 +194,6 @@ class PopulateTemplateResponse(UncheckedBaseModel): The user who created the Prompt. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Prompt Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Prompt Version was committed. - """ - - status: VersionStatus = pydantic.Field() - """ - The status of the Prompt Version. - """ - last_used_at: dt.datetime version_logs_count: int = pydantic.Field() """ @@ -246,12 +233,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(FlowResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(MonitoringEvaluatorResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(PromptResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(ToolResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(VersionDeploymentResponse, PopulateTemplateResponse=PopulateTemplateResponse) -update_forward_refs(VersionIdResponse, PopulateTemplateResponse=PopulateTemplateResponse) diff --git a/src/humanloop/types/prompt_call_response.py b/src/humanloop/types/prompt_call_response.py index dc643472..4e1ae69c 100644 --- a/src/humanloop/types/prompt_call_response.py +++ b/src/humanloop/types/prompt_call_response.py @@ -1,6 +1,5 @@ # This file was auto-generated by Fern from our API Definition. -from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel from .evaluator_response import EvaluatorResponse from .flow_response import FlowResponse @@ -17,7 +16,6 @@ from .log_status import LogStatus from .prompt_call_log_response import PromptCallLogResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -from ..core.pydantic_utilities import update_forward_refs class PromptCallResponse(UncheckedBaseModel): @@ -127,12 +125,3 @@ class Config: frozen = True smart_union = True extra = pydantic.Extra.allow - - -update_forward_refs(EvaluatorResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(FlowResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(MonitoringEvaluatorResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(PromptResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(ToolResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(VersionDeploymentResponse, PromptCallResponse=PromptCallResponse) -update_forward_refs(VersionIdResponse, PromptCallResponse=PromptCallResponse) diff --git a/src/humanloop/types/prompt_log_response.py b/src/humanloop/types/prompt_log_response.py index a88f7471..2a1bad11 100644 --- a/src/humanloop/types/prompt_log_response.py +++ b/src/humanloop/types/prompt_log_response.py @@ -218,14 +218,4 @@ class Config: from .tool_log_response import ToolLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(FlowResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(PromptResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(ToolResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(VersionDeploymentResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(VersionIdResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(EvaluatorLogResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(FlowLogResponse, PromptLogResponse=PromptLogResponse) -update_forward_refs(ToolLogResponse, PromptLogResponse=PromptLogResponse) update_forward_refs(PromptLogResponse) diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py index 384a295a..07f4755d 100644 --- a/src/humanloop/types/prompt_response.py +++ b/src/humanloop/types/prompt_response.py @@ -16,7 +16,6 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .input_response import InputResponse from .evaluator_aggregate import EvaluatorAggregate from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -141,9 +140,14 @@ class PromptResponse(UncheckedBaseModel): Additional fields to describe the Prompt. Helpful to separate Prompt versions from each other with details on how they were created or used. """ - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. + Unique name for the Prompt version. Version names must be unique for a given Prompt. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the version, e.g., the changes made in this version. """ description: typing.Optional[str] = pydantic.Field(default=None) @@ -184,21 +188,6 @@ class PromptResponse(UncheckedBaseModel): The user who created the Prompt. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Prompt Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Prompt Version was committed. - """ - - status: VersionStatus = pydantic.Field() - """ - The status of the Prompt Version. - """ - last_used_at: dt.datetime version_logs_count: int = pydantic.Field() """ @@ -242,10 +231,4 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, PromptResponse=PromptResponse) -update_forward_refs(FlowResponse, PromptResponse=PromptResponse) -update_forward_refs(MonitoringEvaluatorResponse, PromptResponse=PromptResponse) -update_forward_refs(ToolResponse, PromptResponse=PromptResponse) -update_forward_refs(VersionDeploymentResponse, PromptResponse=PromptResponse) -update_forward_refs(VersionIdResponse, PromptResponse=PromptResponse) update_forward_refs(PromptResponse) diff --git a/src/humanloop/types/tool_log_response.py b/src/humanloop/types/tool_log_response.py index 3680ef38..1b6081c3 100644 --- a/src/humanloop/types/tool_log_response.py +++ b/src/humanloop/types/tool_log_response.py @@ -167,14 +167,4 @@ class Config: from .prompt_log_response import PromptLogResponse # noqa: E402 from .log_response import LogResponse # noqa: E402 -update_forward_refs(EvaluatorLogResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(EvaluatorResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(FlowLogResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(FlowResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(MonitoringEvaluatorResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(PromptLogResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(PromptResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(ToolResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(VersionDeploymentResponse, ToolLogResponse=ToolLogResponse) -update_forward_refs(VersionIdResponse, ToolLogResponse=ToolLogResponse) update_forward_refs(ToolLogResponse) diff --git a/src/humanloop/types/tool_response.py b/src/humanloop/types/tool_response.py index 41f84766..0b835918 100644 --- a/src/humanloop/types/tool_response.py +++ b/src/humanloop/types/tool_response.py @@ -9,7 +9,6 @@ from .environment_response import EnvironmentResponse import datetime as dt from .user_response import UserResponse -from .version_status import VersionStatus from .input_response import InputResponse from .evaluator_aggregate import EvaluatorAggregate from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -64,9 +63,14 @@ class ToolResponse(UncheckedBaseModel): Type of Tool. """ - commit_message: typing.Optional[str] = pydantic.Field(default=None) + version_name: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. + Unique identifier for this Tool version. Each Tool can only have one version with a given name. + """ + + version_description: typing.Optional[str] = pydantic.Field(default=None) + """ + Description of the Version. """ name: str = pydantic.Field() @@ -107,21 +111,6 @@ class ToolResponse(UncheckedBaseModel): The user who created the Tool. """ - committed_by: typing.Optional[UserResponse] = pydantic.Field(default=None) - """ - The user who committed the Tool Version. - """ - - committed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None) - """ - The date and time the Tool Version was committed. - """ - - status: VersionStatus = pydantic.Field() - """ - The status of the Tool Version. - """ - last_used_at: dt.datetime version_logs_count: int = pydantic.Field() """ @@ -170,10 +159,4 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response import VersionIdResponse # noqa: E402 -update_forward_refs(EvaluatorResponse, ToolResponse=ToolResponse) -update_forward_refs(FlowResponse, ToolResponse=ToolResponse) -update_forward_refs(MonitoringEvaluatorResponse, ToolResponse=ToolResponse) -update_forward_refs(PromptResponse, ToolResponse=ToolResponse) -update_forward_refs(VersionDeploymentResponse, ToolResponse=ToolResponse) -update_forward_refs(VersionIdResponse, ToolResponse=ToolResponse) update_forward_refs(ToolResponse) diff --git a/src/humanloop/types/commit_request.py b/src/humanloop/types/update_version_request.py similarity index 67% rename from src/humanloop/types/commit_request.py rename to src/humanloop/types/update_version_request.py index 9eca370c..90f4f488 100644 --- a/src/humanloop/types/commit_request.py +++ b/src/humanloop/types/update_version_request.py @@ -1,15 +1,20 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import typing import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing -class CommitRequest(UncheckedBaseModel): - commit_message: str = pydantic.Field() +class UpdateVersionRequest(UncheckedBaseModel): + name: typing.Optional[str] = pydantic.Field(default=None) + """ + Name of the version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) """ - Message describing the changes made. + Description of the version. """ if IS_PYDANTIC_V2: diff --git a/src/humanloop/types/version_deployment_response.py b/src/humanloop/types/version_deployment_response.py index 012b0583..e2e82d9f 100644 --- a/src/humanloop/types/version_deployment_response.py +++ b/src/humanloop/types/version_deployment_response.py @@ -44,10 +44,4 @@ class Config: from .version_id_response import VersionIdResponse # noqa: E402 from .version_deployment_response_file import VersionDeploymentResponseFile # noqa: E402 -update_forward_refs(EvaluatorResponse, VersionDeploymentResponse=VersionDeploymentResponse) -update_forward_refs(FlowResponse, VersionDeploymentResponse=VersionDeploymentResponse) -update_forward_refs(MonitoringEvaluatorResponse, VersionDeploymentResponse=VersionDeploymentResponse) -update_forward_refs(PromptResponse, VersionDeploymentResponse=VersionDeploymentResponse) -update_forward_refs(ToolResponse, VersionDeploymentResponse=VersionDeploymentResponse) -update_forward_refs(VersionIdResponse, VersionDeploymentResponse=VersionDeploymentResponse) update_forward_refs(VersionDeploymentResponse) diff --git a/src/humanloop/types/version_id_response.py b/src/humanloop/types/version_id_response.py index 8670d853..877851a9 100644 --- a/src/humanloop/types/version_id_response.py +++ b/src/humanloop/types/version_id_response.py @@ -38,10 +38,4 @@ class Config: from .version_deployment_response import VersionDeploymentResponse # noqa: E402 from .version_id_response_version import VersionIdResponseVersion # noqa: E402 -update_forward_refs(EvaluatorResponse, VersionIdResponse=VersionIdResponse) -update_forward_refs(FlowResponse, VersionIdResponse=VersionIdResponse) -update_forward_refs(MonitoringEvaluatorResponse, VersionIdResponse=VersionIdResponse) -update_forward_refs(PromptResponse, VersionIdResponse=VersionIdResponse) -update_forward_refs(ToolResponse, VersionIdResponse=VersionIdResponse) -update_forward_refs(VersionDeploymentResponse, VersionIdResponse=VersionIdResponse) update_forward_refs(VersionIdResponse)